query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Prepare to restart the service
|
def _restart(self):
pass
|
[
"def restart(self):\n self.logger.debug('Server - td-agent-bit - restart call.')\n self.change_service_status(\"restart\")",
"def on_restart(self):\n self.set_state(SupvisorsStates.RESTARTING)",
"def restart(self):\n\t\trun('/etc/init.d/puppet restart')",
"def restart(self):\n cfg.CONF.reload_config_files()\n self.services.restart()",
"def restart():\n terminate()\n build_driver()",
"def service_restart():\n require('root', provided_by=('staging', 'production'))\n with settings(sudo_user=\"root\"):\n sudo('stop cchq_www', user=env.sudo_user)\n sudo('initctl reload-configuration', user=env.sudo_user)\n sudo('start cchq_www', user=env.sudo_user)",
"def restart(message):\n subprocess.run(['service', SERVICE, 'restart'])\n logging.warning('Restart because of {}'.format(message))\n time.sleep(60)",
"def restart_services():\n time.sleep(10)\n os.system('service rsyslog restart \\\n && /opt/omi/bin/service_control restart \\\n && /opt/microsoft/omsagent/bin/service_control restart')",
"def restart(self):\n logging.warning(\"Restarting openbts\")\n envoy.run(\"sudo supervisordctl restart openbts\")",
"def restart(self):\n\t\tbody = dict()\n\t\tbody[\"restart_server\"] = {\n\t\t\t\"stop_type\" : \"soft\",\n \t\t\t\"timeout\" : \"30\",\n \t\t\t\"timeout_action\" : \"destroy\"\n\t\t}\n\t\tself.cloud_manager.post_request(\"/server/\" + self.uuid + \"/restart\" , body)\n\t\tobject.__setattr__(self, \"state\", \"maintenance\") # post_request already handles any errors from API",
"def restart_webserver():\n require('service_name')\n sudo('service nginx reload')\n try:\n sudo('stop %(service_name)s' % env)\n except: # Might be already stopped\n pass\n try:\n sudo('start %(service_name)s' % env)\n except: # Might be already started\n pass",
"def restart():\n restart_uwsgi()\n clear_logs()\n restart_celeryd()",
"def restart():\n supervisor_run(\"restart welt2000\")\n run(\"sleep 1\")\n supervisor_run(\"tail welt2000\")",
"def restart():\n cmd = f'supervisorctl restart pocs-power-server'\n print(f'Running: {cmd}')\n subprocess.run(cmd, shell=True)",
"def test_restart_run(self):\n pass",
"def _restart_studio_service(self, container: model.Container) -> None:\n logger.debug(\"Restarting Studio service\")\n container.restart(\"studio\")\n logger.debug(\"Successfully issued Studio service restart\")",
"def handle_restart_service(self, hermes, intent_message):\n if intent_message.slots and intent_message.slots.snips_service:\n service = intent_message.slots.snips_service.first().value\n else:\n service = ''\n\n self.announce_and_restart_service(intent_message, service)",
"def restart(self, msg, *args):\n\t\t# need to run handoff_all NOT as a greenlet associated with a client\n\t\tself.reply(msg, \"Restarting process\")\n\t\tgevent.spawn(handoff_all)",
"async def restart(self, ctx):\n\t\tlog.warning(f'[{ctx.author.id}][{ctx.author.name}] has initiated a shutdown!')\n\t\tsys.exit(42)",
"def restart_vpnclient():\n logging.debug('restarting pihole')\n #call([\"docker-compose\", \"-f\", \"/home/pi/pihole/docker-compose.yml\", \"stop\"])\n #time.sleep(3)\n #call([\"bash -c \\\"yes | docker system prune \\\"\"])\n #logging.debug('docker stopped and pruned')\n call([\"/usr/bin/systemctl restart vpnclient.service\"], shell=True)\n logging.debug('vpnclient restarted')\n #time.sleep(30)\n #call([\"docker-compose\", \"-f\", \"/home/pi/pihole/docker-compose.yml\", \"start\"])\n #logging.debug('started pihole back')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert milliseconds to seconds
|
def millisec_to_sec(self, millisec):
return millisec / 1000
|
[
"def convert_ms(millis):\n millis = int(millis)\n seconds = (millis / 1000) % 60\n seconds = int(seconds)\n minutes = (millis / (1000 * 60)) % 60\n minutes = int(minutes)\n # hours=(millis/(1000*60*60))%24\n min2sec = minutes * 60\n\n return min2sec + seconds",
"def to_seconds(timing, unit):\n return timing*seconds_in_unit(unit)",
"def _ms_human_readable(ms):\n minutes = int(ms / 60000)\n seconds = int((ms - minutes * 60000) / 1000)\n return minutes, seconds",
"def minsec_to_ms(minsec: str):\n\n if(\".\" in minsec):\n mins = int(minsec.split(\".\")[0])\n secs = int(minsec.split(\".\")[1])\n else:\n mins = int(minsec.split(\":\")[0])\n secs = int(minsec.split(\":\")[1])\n\n return float((mins * 60 + secs) * 1000)",
"def datetime_to_ms(dt):\n seconds = calendar.timegm(dt.utctimetuple())\n return seconds * 1000 + int(dt.microsecond / 1000)",
"def seconds2time(my_seconds):\n return (datetime(1970,1,1) + timedelta(seconds=my_seconds)).time()",
"def tstamp_to_milisseconds(self, timestamp):\n \n ftr = [3600, 60, 1] # lista de segundos para transformar hrs, min, seg\n\n # separa horas, minutos e segundos, convert todos para segundos e soma\n return 1000*sum([a*b for a, b in\n zip (ftr, [int(i) for i in\n timestamp.split(\":\")])])",
"def ConvertTimeToSeconds(time):\n splittime = time.split(\":\")\n minutes = int(splittime[0])\n seconds = int(splittime[1])\n timetotal = 60*minutes + seconds\n return timetotal",
"def ms(self, t):\n return t // 1000000",
"def to_sec(duration):\n return duration.secs + duration.nsecs * 10 ** -9",
"def to_seconds(*, hours=0, minutes=0, seconds=0) -> int:\n assert isinstance(hours, int), TypeError\n assert isinstance(minutes, int), TypeError\n assert isinstance(seconds, int), TypeError\n return hours * 3600 + minutes * 60 + seconds",
"def to_seconds(self, dt):\n return int((dt - datetime(1970, 1, 1)).total_seconds())",
"def seconds(**kwargs: int) -> int:\n return math.ceil(datetime.timedelta(**kwargs).total_seconds())",
"def convert_time_to_seconds(time_string):\n if time_string[-1] == 's':\n return int(time_string[:-1])\n else:\n denominations = [int(t) for t in time_string.split(':')]\n converts = [60**i for i in reversed(range(len(denominations)))]\n return sum([c*d for c, d in zip(converts, denominations)])",
"def clip_time_seconds(string):\n if (string == None) or (len(string) < 1):\n return 0\n value = 0\n if \"ms\" in string:\n value = float(string.replace(\"ms\", \"\")) * 0.001\n elif \"s\" in string:\n value = float(string.replace(\"s\", \"\"))\n elif \"h\" in string:\n value = float(string.replace(\"h\", \"\")) * 3600\n elif \"min\" in string:\n value = float(string.replace(\"min\", \"\")) * 60\n else:\n v_h = 0\n v_m = 0\n v_s = 0\n v_d = 0\n str_hms = string\n if \".\" in str_hms:\n str_hms, str_d = str_hms.split(\".\")\n if len(str_d) > 0:\n v_d = 1.0 * int(str_d) / (10 ** len(str_d))\n arr_hms = str_hms.split(\":\")\n v_n = len(arr_hms)\n if v_n >= 1:\n v_s = int(arr_hms[-1])\n if v_n >= 2:\n v_m = int(arr_hms[-2])\n if v_n >= 3:\n v_h = int(arr_hms[-3])\n value = v_h * 3600 + v_m * 60 + v_s + v_d\n return value",
"def htk_to_ms(htk_time):\n if type(htk_time)==type(\"string\"):\n htk_time = float(htk_time)\n return htk_time / 10000.0",
"def slurm_time_to_seconds(time:str) -> int:\n # Get rid of the milliseconds and change the separator for day to hours from \"-\" to \":\"\n time_tmp = (time.replace(\"-\",\":\")).rsplit('.',1)[0]\n # Split each units of time (seconds, minutes, hours and days) and convert them into seconds before adding them together.\n seconds=sum(x * int(t) for x, t in zip([1, 60, 3600, 86400], reversed(time_tmp.split(\":\"))))\n return seconds",
"def epoch_timestamp_to_ms_timestamp(ts: int) -> int:\n return int(ts * 1000)",
"def htk_to_ms(htk_time):\n if type(htk_time)==type(\"string\"):\n htk_time = float(htk_time)\n return htk_time / 50000.0",
"def to_seconds(**kwargs):\n\n time_converter_map = {\"years\": Cache.years_to_seconds,\n \"months\": Cache.months_to_seconds,\n \"weeks\": Cache.weeks_to_seconds,\n \"days\": Cache.days_to_seconds,\n \"hours\": Cache.hours_to_seconds,\n \"minutes\": Cache.minutes_to_seconds,\n \"seconds\": Cache.seconds_to_seconds}\n\n # converts keywords arguments to seconds\n seconds = []\n\n for key, value in list(kwargs.items()):\n if key in time_converter_map:\n seconds.append(time_converter_map[key](value))\n else:\n msg = \"invalid time argument: %s\" % key\n raise TimeError(msg)\n \n return sum(seconds)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Current time in milliseconds.
|
def current_time_millis(self):
return int(round(time.time() * 1000))
|
[
"def get_time(self):\r\n return float(self._cur_time)",
"def current_time_ns():\n return int(time.time() * (10 ** 9))",
"def millis():\r\n return int(round(time.time() * 1000))",
"def get_current_time(self):\n return datetime.datetime.now().strftime(\"%H:%M:%S\")",
"def millis():\r\n return time.time()*1000 - START_TIME_MS",
"def time():\n return datetime.datetime.now()",
"def current_time(self):\n\n method = \"global.getCurrentTime\"\n r = self.request(method=method)\n if r['result'] is False:\n raise RequestError(str(r))\n\n return r['params']['time']",
"def getCurrentTime():\n\n time = datetime.datetime.now().time().strftime('%I:%M %p')\n return time",
"def system_time(self):\n return datetime.utcfromtimestamp(\n self.request('time_facade.SystemTimeMillis') / 1000.0)",
"def time(self):\n return ((self['clock']['initial'] + 40 * self['clock']['increment'])\n / 60)",
"def get_current_time(self):\n if not self.is_data_set():\n return -1\n return self._interval * self._sample_number",
"def get_time(self): # TEST\n return self._game.get_time()",
"def get_physical_time():\n return datetime.now().timestamp()",
"def get_current_time():\r\n return datetime.now().strftime(\"%B %d, %Y %H:%M\")",
"def time_t(self) -> int:\n return self._time_t",
"def time_wall(self):\n return time.time() - self.time_start",
"def ms(self, t):\n return t // 1000000",
"def current_time(self):\n # Get Current time\n current_time = tzlocal.get_localzone().localize(datetime.now())\n utc_dt = current_time.astimezone(pytz.utc)\n unixtime = int(time.mktime(utc_dt.timetuple()))\n readable_time = current_time.strftime('%Y-%m-%d %H:%M:%S')\n return unixtime",
"def now(self) -> int:\n return self._context.block.timestamp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create heights and times that match the hourly UKV extract as UKV data is not used in forward_operator_from_obs()
|
def create_heights_and_times(day):
# heights taken from the UKV
height = np.array([ 5.00000000e+00, 2.16666641e+01, 4.50000000e+01,
7.50000000e+01, 1.11666679e+02, 1.55000000e+02,
2.05000000e+02, 2.61666687e+02, 3.25000000e+02,
3.95000000e+02, 4.71666809e+02, 5.55000000e+02,
6.45000000e+02, 7.41666809e+02, 8.45000000e+02,
9.55000000e+02, 1.07166675e+03, 1.19500000e+03,
1.32500000e+03, 1.46166675e+03, 1.60500000e+03,
1.75500000e+03, 1.91166675e+03, 2.07500000e+03,
2.24500049e+03, 2.42166675e+03, 2.60500000e+03,
2.79500000e+03, 2.99166675e+03, 3.19500000e+03,
3.40500000e+03, 3.62166675e+03, 3.84500000e+03,
4.07500000e+03, 4.31166797e+03, 4.55500000e+03,
4.80500000e+03, 5.06166797e+03, 5.32500000e+03,
5.59500000e+03, 5.87166797e+03, 6.15500781e+03,
6.44514795e+03, 6.74249219e+03, 7.04781592e+03,
7.36235986e+03, 7.68791992e+03, 8.02692822e+03,
8.38258008e+03, 8.75891602e+03, 9.16094434e+03,
9.59475977e+03, 1.00676680e+04, 1.05883076e+04,
1.11667959e+04, 1.18148682e+04, 1.25460244e+04,
1.33756758e+04, 1.43213203e+04, 1.54027041e+04,
1.66419844e+04, 1.80639082e+04, 1.96960273e+04,
2.15688516e+04, 2.37160645e+04, 2.61747168e+04,
2.89854609e+04, 3.21927324e+04, 3.58450039e+04,
4.00000000e+04])
# match resolution of typically extracts UKV data (hourly)
time = eu.date_range(day, day+dt.timedelta(hours=24), 60, 'minutes')
return height, time
|
[
"def compute_state_energies_vs_time( hvib ):\n nsteps = len(hvib) \n nstates = hvib[0].num_of_rows\n energies = []\n for state in range( nstates ):\n energies.append( [] )\n for step in range( nsteps ):\n energies[ state ].append( hvib[ step ].get( state, state ).real - hvib[ step ].get( 0, 0 ).real )\n return np.array( energies )",
"def read_wxt_obs(day, time, z):\n\n filepath = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + day.strftime('%Y') + '_15min.nc'\n wxt_obs = eu.netCDF_read(filepath, vars=['time', 'RH', 'Tair', 'press'])\n\n # extract out RH obs to match mod_time\n # pull out ALL the nearest time idxs and differences\n # the mod_data time is the same for all sites so can therefore use any site\n t_idx = np.array([eu.nearest(wxt_obs['time'], t)[1] for t in time])\n t_diff = np.array([eu.nearest(wxt_obs['time'], t)[2] for t in time])\n\n wxt_obs['RH'] = wxt_obs['RH'][t_idx] # [%]\n wxt_obs['Tair'] = wxt_obs['Tair'][t_idx] # [degC]\n wxt_obs['press'] = wxt_obs['press'][t_idx] # [hPa]\n wxt_obs['time'] = wxt_obs['time'][t_idx]\n # wxt_obs['rawtime'] = wxt_obs['rawtime'][t_idx]\n\n # overwrite t_idx locations where t_diff is too high with nans\n # only keep t_idx values where the difference is below 1 hour\n bad = np.array([abs(i.days * 86400 + i.seconds) > 60 * 60 for i in t_diff])\n\n wxt_obs['RH'][bad] = np.nan\n wxt_obs['Tair'][bad] = np.nan\n wxt_obs['press'][bad] = np.nan\n\n wxt_obs['time'][bad] = np.nan\n # wxt_obs['rawtime'][bad] = np.nan\n\n # create RH_frac using RH data\n wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0\n\n # calculate extra variables\n e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure\n e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure\n wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure\n wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]\n wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]\n wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]\n wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]\n\n # extend the wxt obs in height to match the dimensions of model RH\n # copy the obs so it is the same at all heights\n for var, item in wxt_obs.iteritems():\n if var not in ['time', 'rawtime']:\n # wxt_obs[var] = np.transpose(np.tile(item, (int(rh_frac.shape[1]), 1)))\n wxt_obs[var] = np.transpose(np.tile(item, (int(z.shape[-1]), 1)))\n\n return wxt_obs",
"def _parse_hdus(cls, hdulist):\n # Open file with PyFITS\n fits_record = hdulist[1].data\n\n metadata = MetaDict(OrderedDict(hdulist[0].header))\n start_str = metadata.get('date-obs', metadata.get('date_obs', ''))\n start = parse_time(start_str)\n\n # First column are times. For level 2 data, the units are [s].\n # For level 3 data, the units are [min]\n if hdulist[1].header['TUNIT1'] == 's':\n times = start + TimeDelta(fits_record.field(0)*u.second)\n elif hdulist[1].header['TUNIT1'] == 'MIN':\n td = [int(n) for n in fits_record.field(0)]\n times = start + TimeDelta(td*u.minute)\n else:\n raise ValueError(\"Time unit in LYRA fits file not recognised. \"\n \"Value = {}\".format(hdulist[1].header['TUNIT1']))\n\n # Rest of columns are the data\n table = {}\n\n for i, col in enumerate(fits_record.columns[1:-1]):\n # temporary patch for big-endian data bug on pandas 0.13\n if fits_record.field(i+1).dtype.byteorder == '>' and sys.byteorder == 'little':\n table[col.name] = fits_record.field(i + 1).byteswap().newbyteorder()\n else:\n table[col.name] = fits_record.field(i + 1)\n\n # Return the header and the data\n times.precision = 9\n data = pandas.DataFrame(table, index=times.isot.astype('datetime64'))\n data.sort_index(inplace=True)\n\n # Add the units data\n units = OrderedDict([('CHANNEL1', u.W/u.m**2),\n ('CHANNEL2', u.W/u.m**2),\n ('CHANNEL3', u.W/u.m**2),\n ('CHANNEL4', u.W/u.m**2)])\n # TODO: check: http://www.wmo-sat.info/oscar/instruments/view/733\n return data, metadata, units",
"def accumulate24Hourly(data):\n newTimeValues=[]\n taxis=data.getTime()\n tunits=data.units\n print len(data.getTime())\n newarray=[]\n\n for i in range((tlen/2)):\n p1=data(time=slice(i,i+1))\n p2=data(time=slice(i+1,i+2))\n accum=p1+p2\n newarray.append(accum)\n newTimeValues.append(p2.getTime()[0])\n\n array=MA.concatenate(newarray)\n array=MA.array(array, 'f', fill_value=data.getMissing())\n axes=data.getAxisList()\n newTimeAxis=cdms.createAxis(newTimeValues)\n newTimeAxis.units=tunits\n newTimeAxis.designateTime()\n newTimeAxis.id=newTimeAxis.long_name=newTimeAxis.title=\"time\"\n \n newaxes=[newTimeAxis]+axes[1:]\n var=cdms.createVariable(array, axes=newaxes, id=data.id)\n for att in (\"units\", \"long_name\"):\n setattr(var, att, getattr(data, att))\n return var",
"def compute_tb_muhuurtas(self):\n if getattr(self, \"jd_sunrise\", None) is None:\n self.compute_sun_moon_transitions()\n tb_muhuurtas = []\n for muhuurta_id in range(0, 15):\n (jd_start, jd_end) = interval.get_interval(start_jd=self.jd_sunrise, end_jd=self.jd_sunset,\n part_index=muhuurta_id, num_parts=15).to_tuple()\n from jyotisha.panchaanga.temporal.interval import TbSayanaMuhuurta\n tb_muhuurtas.append(TbSayanaMuhuurta(\n jd_start=jd_start, jd_end=jd_end,\n muhuurta_id=muhuurta_id))\n self.day_length_based_periods.tb_muhuurtas = tb_muhuurtas",
"def twophotonHRead():\n xuvtop = os.environ['XUVTOP']\n fName = os.path.join(xuvtop, 'continuum', 'hseq_2photon.dat')\n dFile = open(fName, 'r')\n a = dFile.readline()\n y0 = np.asarray(a.split())\n a = dFile.readline()\n z0 = np.asarray(a.split())\n nz = 30\n avalue = np.zeros(nz, 'float64')\n asum = np.zeros(nz, 'float64')\n psi0 = np.zeros((nz, 17), 'float64')\n for iz in range(nz):\n a = dFile.readline().split()\n avalue[iz] = float(a[1])\n asum[iz] = float(a[2])\n psi = np.asarray(a[3:])\n psi0[iz] = psi\n dFile.close()\n return {'y0':y0, 'z0':z0, 'avalue':avalue, 'asum':asum, 'psi0':psi0.reshape(30, 17)}",
"def extract_hour_data(df):\n\n all_df = []\n cols = df.columns\n assert cols[4] == '1'\n assert cols[27] == '24'\n for i in range(df.shape[0]):\n test_s = df.iloc[i, 4:28]\n test_df = pd.DataFrame(data=test_s.values, index=test_s.index, columns=['energy'])\n test_df['date'] = df.index[i]\n test_df['month'] = df.index[i].month\n test_df['year'] = df.index[i].year\n test_df['day'] = df.index[i].day\n test_df['hour'] = test_s.index.astype(np.int)\n test_df['measure_type'] = df.iloc[i, 0].astype(np.int)\n all_df.append(test_df)\n\n all_df = pd.concat(all_df)\n ndf = pd.to_datetime(all_df[['month', 'year', 'day', 'hour']], utc=False)\n all_df.index = pd.DatetimeIndex(ndf.values, tz='Japan')\n return all_df[['energy', 'measure_type']]",
"def time_entries():\n return {'test_project': 12.21 * 3600, 'pseudo_project': 1000}",
"def hourly_by_cols(hourly_wdb, width, height, sun_wdb, COLORS, col_width=5):\n # this does not belong here. Need to figure oout how to kill it...\n # import utils.utilities as utils\n # import printers.colorfuncs as cf\n # begin main functioning!\n res = []\n _keys = [\"Temp\", \"Cloud %\", \"Precip Chance\", \"Wind speed\",\n \"Sunrise/set\", \"Time\"]\n head = max(list(len(z) for z in _keys))\n ind_slice = (width - head - 2) // col_width\n # build the basic info strings\n # NOTE: fix the mix of tuples and lists...\n format_table = [[\"Temp\", ('temp', 'english'), cf.bar_temp_color, 11],\n (\"Cloud %\", ('sky', ), cf.bar_cloud_color, 1),\n (\"Precip Chance\", ('pop', ), cf.bar_precip_color, 1),\n (\"Wind speed\", ('wspd', 'english'), cf.bar_wind_color, 1)]\n # here we make sure we don't return something higher tha=n the screen\n # NOTE:\n # the magic '3' will disappear once the whole format_table gets built\n # according to a formatting function\n table_height = sum(list(f[3] for f in format_table)) + 3\n if table_height > height:\n format_table[0][3] = max(1, format_table[0][3] - (table_height\n - height))\n for r in format_table:\n _lis = list(utils.eat_keys(\n hour, r[1]) for hour in hourly_wdb)\n temp, star_ind = cols_formatter(_lis[:ind_slice],\n COLORS, r[2], r[3], col_width)\n for lin in range(len(temp)):\n res.append(\"{}{}\".format(\"{:>{wid}}{}\".format(r[0], \": \", wid=head)\n if lin == star_ind else \" \" * (head + 2), temp[lin]))\n # build the sunrise/sunset string\n # build the alternate sunrise/sunset string\n temp = phutils.new_sunrise_line(hourly_wdb[:ind_slice], sun_wdb, COLORS,\n col_width=col_width, head=head)\n res.append(temp)\n # build the time string\n # TEST 1\n # sunrise = (sun_wdb['sunrise']['hour'], sun_wdb['sunrise']['minute'])\n # sunset = (sun_wdb['sunrise']['hour'], sun_wdb['sunrise']['minute'])\n # color_func = cf.sunrise_sunset_color\n # color_func_vars = [sunrise, sunset, COLORS]\n # TEST 2\n # color_func = cf.new_sunrise_sunset_color\n # color_func_vars = [sunrise, sunset, COLORS]\n # TEST 3\n color_func = cf.alternating_background\n color_func_vars = [lambda x, y: x % 2 == 1, COLORS]\n # TEST 3 1/2\n # color_func_vars = [lambda x, y: x < 10, COLORS]\n temp = \"\".join(list(phutils.time_format_generator(hourly_wdb[:ind_slice],\n \"Time\", head,\n col_width,\n color_func, COLORS.clear,\n *color_func_vars)))\n # build the alternate time string\n res.append(temp)\n # insert a line before and after? No... let's not\n # res.insert(0, '-' * (head + col_width * ind_slice))\n # res.append('-' * (head + col_width * ind_slice))\n # return the result!\n return res",
"def parse_wu_table(yr, mo, dy):\n\n # -- set the file\n html = os.path.join(\"output\", \"wunderhtml\",\n \"DailyHistory_{0:04}_{1:02}_{2:02}.html\" \\\n .format(yr, mo, dy))\n fopen = open(html, \"r\")\n soup = bs4.BeautifulSoup(fopen, \"html.parser\")\n\n # -- get header\n hdr = [i.text for i in soup.find(\"table\",\n attrs={\"class\" : \"obs-table responsive\"}) \\\n .find(\"thead\").find_all(\"tr\")[0].find_all(\"th\")]\n\n # -- get the hourly weather table from html\n rows = soup.find(\"table\", attrs={\"class\" : \"obs-table responsive\"}) \\\n .find(\"tbody\").find_all(\"tr\")\n tbl = [[ele.text.strip() for ele in row.find_all(\"td\")] for row in rows]\n fopen.close()\n\n # -- convert to dataframe\n if any([\"EDT\" in i for i in hdr]):\n cols = [\"Time (EDT)\", \"Temp.\", \"Humidity\", \"Precip\"]\n else:\n cols = [\"Time (EST)\", \"Temp.\", \"Humidity\", \"Precip\"]\n data = pd.DataFrame(tbl, columns=hdr)[cols]\n data.columns = [\"time\", \"temp\", \"humidity\", \"precip\"]\n \n # -- parse columns\n def time_to_datetime(tstr):\n \"\"\" Convert Weather Underground EST to datetime. \"\"\"\n\n return datetime.datetime.strptime(\"{0:04}/{1:02}/{2:02} \" \\\n .format(yr, mo, dy) + tstr,\n \"%Y/%m/%d %I:%M %p\")\n\n data[\"time\"] = data[\"time\"].apply(time_to_datetime)\n data[\"temp\"] = pd.to_numeric(data[\"temp\"] \\\n .apply(lambda x: x.encode(\"ascii\", \"ignore\") \\\n .replace(\"F\", \"\")), errors=\"coerce\")\n data[\"humidity\"] = pd.to_numeric([i[:-1] for i in\n data[\"humidity\"]], errors=\"coerce\")\n data[\"precip\"] = [0.0 if i == \"N/A\" else float(i[:-3]) for i in\n data[\"precip\"]]\n\n # -- add daily precipitation\n data[\"daily_precip\"] = [parse_daily_precipitation(soup)] * len(data)\n\n return data",
"def extract_time_variants(self):\n\n for scenario in self.scenarios_to_run:\n self.scaleup_data[scenario] = {}\n for parameter in self.time_variant_parameters:\n self.scaleup_data[scenario][parameter] = copy.copy(self.time_variant_parameters[parameter])",
"def integration_times(hdulist):\n int_times = hdulist['INT_TIMES'].data\n starting = int_times['int_start_MJD_UTC']\n mid = int_times['int_mid_MJD_UTC']\n ending = int_times['int_end_MJD_UTC']\n return starting, mid, ending",
"def get_sample_1940_hh():\n hh_line = \"H19400200024278096700000001000009100000000001198632410100102100000009999000260300026007000840199990012200020999999901223233100110101000000001000900000000100090\"\n return hh_line",
"def getObsTime(vscan,area,mode='optimal'):\n \n # Verify the mode\n if (mode != 'optimal'):\n print(\"Observing mode parameter is not in allowed values\")\n print(\"Allowed values are [optimal]\")\n return\n \n if (mode == 'optimal'):\n pixSizeBand = getPixSizeBand(over=useDef)\n bbopFoV = getGeomQuant(pixSizeBand)[0]\n # we're in the mode were we have found the optimal compromise between on-source\n # time and map execution time\n # Formulas used below come from document BBOP-DAp-RP-002\n # The formula for the obsTime may be slightly wrong because \n # scanlegs do not come in fraction so somewhere there should be rounding\n # to the integer immediately above. Not considered major here.\n onSrcTime = bbopFoV / (0.84*vscan)\n obsTime = area * (3600./vscan) * (1.2 * (3600./bbopFoV+0.54))\n # determines the number of scan legs required for this observation\n # the +2 is because I cannot accept a fractional number of legs spacings\n # so the map is overdimensioned (+1 w.r.t to the integral part of the ratio\n # between map widths and leg spacing), and then for a given nunber of spacings\n # we have to make +1 number of legs\n nLegs = int(3600.*math.sqrt(area)/(bbopFoV*0.84))+2\n #print nLegs\n # add the turn around overhead\n # for each leg of the scan we need to accelerate to it and brake at the end\n # this assumes that spacecraft is delivered standing still to begin the \n # observation, and is positioned standing still at the end of the obs.\n # We need then one step motion betwen the legs, so one less than the \n # number of legs. \n # Since here we are computing the time for 1 map, we assume the observing \n # time clock stops at the end of the map.\n obsTimeFull = obsTime + (nLegs-1)*timeStep + nLegs*2*timeBrakeAccel\n # add the initial overhead\n obsTimeFull += initOverhead\n \n return [onSrcTime,obsTimeFull, obsTime/obsTimeFull,nLegs]",
"def recreate_mag_stats_test_data(filename=TEST_DATA_DIR / 'mag-stats.h5'):\n from astropy.table import vstack\n\n if os.path.exists(filename):\n os.unlink(filename)\n\n star_obs_catalogs.load()\n mp_starcat_time = [\n '2011:288:06:14:49.501',\n '2021:015:00:01:45.585',\n '2021:089:02:48:00.575',\n '2021:201:02:58:03.250',\n '2018:296:15:53:14.596',\n ]\n STARS_OBS = star_obs_catalogs.STARS_OBS[\n np.in1d(star_obs_catalogs.STARS_OBS['mp_starcat_time'], mp_starcat_time)\n ]\n STARS_OBS = STARS_OBS.group_by('agasc_id')\n STARS_OBS.add_index('agasc_id')\n STARS_OBS.write(\n filename,\n path='/obs_status/cat/STARS_OBS',\n serialize_meta=True,\n append=True,\n overwrite=True\n )\n\n telem = mag_estimate.get_telemetry_by_agasc_id(10492752)\n\n # the starting times might be a bit arbitrary. They are chose to remove the first points, which\n # might be in maneuver mode or in acquisition. These times come from the kadi events v1 version,\n # but they do not matter much.\n telem_by_obsid = [\n telem[(telem['obsid'] == 12800) & (telem['times'] > 435047672.)][:100],\n # only 10 points, excluding the beginning\n telem[(telem['obsid'] == 23681) & (telem['times'] > 727057549.)][:10],\n telem[(telem['obsid'] == 23682) & (telem['times'] > 733462165.)][:100],\n telem[(telem['obsid'] == 23683) & (telem['times'] > 743139160.)][:100],\n telem[(telem['obsid'] == 48900) & (telem['times'] > 656698074.)][:100],\n ]\n telem_by_obsid[-1]['mags_img'] += 0.01 * np.exp(np.arange(100) / 20)\n telem_by_obsid[-1]['mags'] += 0.01 * np.exp(np.arange(100) / 20)\n t = vstack(telem_by_obsid)\n\n t.write(filename, path='/obs_status/telem', serialize_meta=True, append=True)",
"def time_match_pm_RH_dN(pm2p5_mass_in, pm10_mass_in, met_in, dN_in, timeRes):\n\n ## 1. set up dictionaries with times\n # Match data to the dN data.\n # time range - APS time res: 5 min, DMPS time res: ~12 min\n start_time = dN_in['time'][0]\n end_time = dN_in['time'][-1]\n time_range = eu.date_range(start_time, end_time, timeRes, 'minutes')\n\n # make sure datetimes are in UTC\n from_zone = tz.gettz('GMT')\n to_zone = tz.gettz('UTC')\n time_range = np.array([i.replace(tzinfo=from_zone) for i in time_range])\n\n\n # set up dictionaries (just with time and any non-time related values at the moment)\n pm2p5_mass = {'time': time_range}\n pm10_mass = {'time': time_range}\n dN = {'time': time_range, 'D': dN_in['D'], 'dD': dN_in['dD'],\n 'grimm_idx': dN_in['grimm_idx'], 'smps_idx': dN_in['smps_idx'],\n 'grimm_geisinger_idx': dN_in['grimm_geisinger_idx'], 'smps_geisinger_idx': dN_in['smps_geisinger_idx']}\n met = {'time': time_range}\n\n ## 2. set up empty arrays within dictionaries\n # prepare empty arrays within the outputted dictionaries for the other variables, ready to be filled.\n for var, var_in in zip([pm2p5_mass, pm10_mass, met, dN], [pm2p5_mass_in, pm10_mass_in, met_in, dN_in]):\n\n for key in var_in.iterkeys():\n # only fill up the variables\n if key not in ['time', 'D', 'dD', 'grimm_idx', 'smps_idx', 'grimm_geisinger_idx', 'smps_geisinger_idx']:\n\n # make sure the dimensions of the arrays are ok. Will either be 1D (e.g RH) or 2D (e.g. dN)\n dims = var_in[key].ndim\n if dims == 1:\n var[key] = np.empty(len(time_range))\n var[key][:] = np.nan\n else:\n var[key] = np.empty((len(time_range), var_in[key].shape[1]))\n var[key][:] = np.nan\n\n\n ## 3. fill the variables with time averages\n # use a moving subsample assuming the data is in ascending order\n for var, var_in in zip([pm2p5_mass, pm10_mass, met, dN], [pm2p5_mass_in, pm10_mass_in, met_in, dN_in]):\n\n # set skip idx to 0 to begin with\n # it will increase after each t loop\n skip_idx = 0\n\n for t in range(len(time_range)):\n\n\n # find data for this time\n binary = np.logical_and(var_in['time'][skip_idx:skip_idx+100] > time_range[t],\n var_in['time'][skip_idx:skip_idx+100] <= time_range[t] + dt.timedelta(minutes=timeRes))\n\n # actual idx of the data within the entire array\n skip_idx_set_i = np.where(binary == True)[0] + skip_idx\n\n # create means of the data for this time period\n for key in var.iterkeys():\n if key not in ['time', 'D', 'dD', 'grimm_idx', 'grimm_geisinger_idx', 'smps_idx', 'smps_geisinger_idx']:\n\n dims = var_in[key].ndim\n if dims == 1:\n var[key][t] = np.nanmean(var_in[key][skip_idx_set_i])\n else:\n var[key][t, :] = np.nanmean(var_in[key][skip_idx_set_i, :], axis=0)\n\n # change the skip_idx for the next loop to start just after where last idx finished\n if skip_idx_set_i.size != 0:\n skip_idx = skip_idx_set_i[-1] + 1\n\n\n ## 4. nan across variables for missing data\n # make data for any instance of time, t, to be nan if any data is missing from dN, met or pm mass data\n\n ## 4.1 find bad items\n # make and append to a list, rows where bad data is present, across all the variables\n bad = []\n\n for var in [pm2p5_mass, pm10_mass, met, dN]:\n\n for key, data in var.iteritems():\n\n if key not in ['time', 'D', 'dD', 'grimm_idx', 'grimm_geisinger_idx', 'smps_idx', 'smps_geisinger_idx']:\n\n # number of dimensions for data\n dims = data.ndim\n if dims == 1:\n for t in range(len(time_range)):\n if np.isnan(data[t]):\n bad += [t]\n\n else:\n for t in range(len(time_range)):\n if any(np.isnan(data[t, :]) == True): # any nans in the row\n bad += [t] # store the time idx as being bad\n\n ## 4.2 find unique bad idxs and make all values at that time nan, across all the variables\n bad_uni = np.unique(np.array(bad))\n\n for var in [pm2p5_mass, pm10_mass, met, dN]:\n\n for key, data in var.iteritems():\n\n if key not in ['time', 'D', 'dD', 'grimm_idx', 'grimm_geisinger_idx', 'smps_idx', 'smps_geisinger_idx']:\n\n # number of dimensions for data\n dims = data.ndim\n if dims == 1:\n var[key][bad_uni] = np.nan\n\n else:\n var[key][bad_uni, :] = np.nan\n\n\n\n return pm2p5_mass, pm10_mass, met, dN",
"def create_dict(timespan, extremes, numbeats, mean_hr, beat_times):\n logging.info(\"Assigning dictionary entries\")\n metrics = {\"duration\": timespan, \"voltage_extremes\": extremes,\n \"num_beats\": numbeats, \"mean_hr_bpm\": mean_hr,\n \"beats\": beat_times}\n return metrics",
"def subsample_hours(ds):\n samples = chunk_by_hours(ds)\n keys = get_all_keys(samples[0])\n\n s = []\n for sample in samples:\n d = {}\n for key in keys:\n if key == 'Date':\n d[key] = min(get_values_by_key(sample, key))\n else:\n d[key] = np.mean(get_values_by_key(sample, key))\n s.append(d)\n return s",
"def get_Hu():\n \n ue = np.zeros((nx+1,ny)) \n uw = np.zeros((nx+1,ny))\n un = np.zeros((nx+1,ny))\n us = np.zeros((nx+1,ny))\n vn = np.zeros((nx+1,ny))\n vs = np.zeros((nx+1,ny))\n τxxe = np.zeros((nx+1,ny))\n τxxw = np.zeros((nx+1,ny))\n τxyn = np.zeros((nx+1,ny))\n τxys = np.zeros((nx+1,ny))\n Hu = np.zeros((nx+1,ny))\n \n i = np.arange(1,nx) # u-cell centers in domain interior\n \n ue[i,:] = (u[i+1,:] + u[i,:])/2\n uw[i,:] = (u[i,:] + u[i-1,:])/2\n \n j = np.arange(0,ny-1)\n un[IJ(i,j)] = (u[IJ(i,j+1)] + u[IJ(i,j)])/2\n un[i,ny-1] = ubc_t\n j = np.arange(1,ny)\n us[IJ(i,j)] = (u[IJ(i,j)] + u[IJ(i,j-1)])/2\n us[i,0] = ubc_b\n \n j = np.arange(0,ny)\n vn[IJ(i,j)] = (v[IJ(i-1,j+1)]+v[IJ(i,j+1)])/2\n vs[IJ(i,j)] = (v[IJ(i-1,j)] +v[IJ(i,j)]) /2\n \n τxxe[i,:] = -2*ν*(u[i+1,:] - u[i,:]) /Δx\n τxxw[i,:] = -2*ν*(u[i,:] - u[i-1,:])/Δx\n \n j = np.arange(0,ny-1)\n τxyn[IJ(i,j)] = -ν*(u[IJ(i,j+1)]-u[IJ(i,j)])/Δy - ν*(v[IJ(i,j+1)]-v[IJ(i-1,j+1)])/Δx\n τxyn[i,ny-1] = -ν*(ubc_t-u[i,ny-1])/(Δy/2) - ν*(v[i,ny]-v[i-1,ny])/Δx \n \n j = np.arange(1,ny)\n τxys[IJ(i,j)] = -ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy - ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx\n τxys[i,0] = -ν*(u[i,0]-ubc_b)/(Δy/2) - ν*(v[i,0]-v[i-1,0])/Δx\n \n Hu[i,:] = -((ue[i,:]*ue[i,:] - uw[i,:]*uw[i,:])/Δx + (un[i,:]*vn[i,:] - us[i,:]*vs[i,:])/Δy) \\\n -((τxxe[i,:] - τxxw[i,:])/Δx + (τxyn[i,:] - τxys[i,:])/Δy)\n \n return Hu"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create the S array from the climatology (month, RH_fraction) given the month and RH
|
def get_S_climatology(time, rh_frac, ceil_lam):
# 1. Read in the data
filename = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/' + \
'S_climatology_NK_SMPS_APS_' + str(ceil_lam) + 'nm.npy'
data = np.load(filename).flat[0]
S_clim = data['S_climatology']
S_RH_frac = data['RH_frac']
# 2. Create S array given the time and RH
# get height range from rh_frac
height_idx_range = rh_frac.shape[1]
# find S array
S = np.empty(rh_frac.shape)
S[:] = np.nan
for t, time_t in enumerate(time): # time
# get month idx (e.g. idx for 5th month = 4)
month_idx = time_t.month - 1
for h in range(height_idx_range): # height
# find RH idx for this month, and put the element into the S array
_, rh_idx, _ = eu.nearest(S_RH_frac, rh_frac[t, h])
S[t, h] = S_clim[month_idx, rh_idx]
return S
|
[
"def subset(self, months):\n #-- check if months is an array or a single value\n months = np.atleast_1d(months)\n #-- number of months\n n = len(months)\n #-- check that all months are available\n months_check = list(set(months) - set(self.month))\n if months_check:\n m = ','.join(['{0:03d}'.format(m) for m in months_check])\n raise IOError('GRACE/GRACE-FO months {0} not Found'.format(m))\n #-- indices to sort data objects\n months_list = [i for i,m in enumerate(self.month) if m in months]\n #-- output harmonics object\n temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))\n #-- create output harmonics\n temp.clm = np.zeros((temp.lmax+1,temp.mmax+1,n))\n temp.slm = np.zeros((temp.lmax+1,temp.mmax+1,n))\n temp.time = np.zeros((n))\n temp.month = np.zeros((n),dtype=np.int)\n temp.filename = []\n #-- for each indice\n for t,i in enumerate(months_list):\n temp.clm[:,:,t] = self.clm[:,:,i].copy()\n temp.slm[:,:,t] = self.slm[:,:,i].copy()\n temp.time[t] = self.time[i].copy()\n temp.month[t] = self.month[i].copy()\n if getattr(self, 'filename'):\n temp.filename.append(self.filename[i])\n #-- assign ndim and shape attributes\n temp.update_dimensions()\n #-- remove singleton dimensions if importing a single value\n return temp.squeeze()",
"def _init_seasons_array(self):\n\t\tx = [self.X[i] - self.L[0] for i in range(self.q)]\n\t\tsecond_season_level = np.mean(self.X[self.q:self.q*2], axis=0)\n\t\ty = [self.X[self.q + i] - second_season_level for i in range(self.q)]\n\n\t\tself.S = []\n\t\tfor i in range(len(x)):\n\t\t\tself.S.append((x[i] + y[i]) / 2)\n\t\tself.S = [self.X[i] - self.L[0] for i in range(self.q)]",
"def calc_r_md_species(r_d_microns, met, aer_i):\n\n\n # calulate r_md based on Fitzgerald (1975) eqn 8 - 10\n def calc_r_md_t(r_d_microns, rh_i, alpha_factor):\n\n \"\"\"\n Calculate r_md for a single value of rh (rh_i) at a time t (alpha and beta will be applied to all rbins)\n :param rh_i:\n :param r_d_microns: NOt the duplicated array!\n :return: r_md_i\n\n\n The r_md calculated here will be for a fixed RH, therefore the single row of r_d_microns will be fine, as it\n will compute a single set of r_md as a result.\n \"\"\"\n\n beta = np.exp((0.00077 * rh_i) / (1.009 - rh_i))\n if rh_i < 0.97:\n phi = 1.058 - ((0.0155 * (rh_i - 0.97))\n / (1.02 - (rh_i ** 1.4)))\n else:\n phi = 1.058\n alpha = 1.2 * np.exp((0.066 * rh_i) / (phi - rh_i))\n\n # alpha factor comes from the Table 1 in Fitzgerald (1975) to be used with some other aerosol types\n r_md_t = alpha_factor * alpha * (r_d_microns ** beta)\n\n return r_md_t\n\n\n\n # duplicate the range of radii to multiple rows, one for each RH - shape(time, rbin).\n # Remember: the number in each diameter bin might change, but the bin diameters themselves will not.\n # Therefore this approach works for constant and time varying number distirbutions.\n r_d_microns_dup = np.tile(r_d_microns, (len(met['time']), 1))\n\n # Set up array for aerosol\n r_md = np.empty(len(met['time']))\n r_md[:] = np.nan\n\n phi = np.empty(len(met['time']))\n phi[:] = np.nan\n\n # limits for what approach to use, depending on the RH\n # from the CLASSIC guidence, follows Fitzgerald (1975)\n if aer_i == '(NH4)2SO4':\n rh_cap = 0.995 # calculate r_md specifically for the upper limit (considered max rh)\n rh_del = 0.81 # calculate r_md specifically for the upper limit (start of empirical formula)\n # CLASSIC does linear interpolation bettween rh_del and rh_eff.\n rh_eff = 0.3 # efflorescence (below is dry)\n alpha_factor = 1.0 # a coefficient for alpha, which is specific for different aerosol types\n elif aer_i == 'NH4NO3':\n rh_cap = 0.995\n rh_del = 0.61\n rh_eff = 0.3\n alpha_factor = 1.06\n\n elif aer_i == 'NaCl':\n rh_cap = 0.995\n rh_del = 0.75\n rh_eff = 0.42\n alpha_factor = 1.35\n\n # --------------------------------------------\n # Calculate r_md for the species, given RH\n # -----------------------------------------------\n\n # empirical relationships fitted for radius in micrometers, not meters (according to CLASSIC guidance).\n\n # --- delequescence - rh cap (defined as 0.995. Above this empirical relationship breaks down) --- #\n\n # Currently just calculates it for all, then gets overwritten lower down, depending on their RH (e.g. below eff)\n # ToDo use the rh_bet_del_cap to only calc for those within the del - cap range.\n\n # # between deliquescence and rh_cap (set at 0.995 for all)\n # bool = np.logical_and(WXT['RH_frac'] >= rh_del, WXT['RH_frac'] <= rh_cap)\n # rh_bet_del_cap = np.where(bool == True)[0]\n\n beta = np.exp((0.00077 * met['RH_frac'])/(1.009 - met['RH_frac']))\n rh_lt_97 = met['RH_frac'] < 0.97\n phi[rh_lt_97] = 1.058\n phi[~rh_lt_97] = 1.058 - ((0.0155 * (met['RH_frac'][~rh_lt_97] - 0.97))\n /(1.02 - (met['RH_frac'][~rh_lt_97] ** 1.4)))\n alpha = 1.2 * np.exp((0.066 * met['RH_frac'])/ (phi - met['RH_frac']))\n\n # duplicate values across to all radii bins to help r_md = .. calculation: alpha_dup.shape = (time, rbin)\n alpha_dup = np.tile(alpha, (len(r_d_microns), 1)).transpose()\n beta_dup = np.tile(beta, (len(r_d_microns), 1)).transpose()\n\n r_md = alpha_factor * alpha_dup * (r_d_microns_dup ** beta_dup)\n\n # --- above rh_cap ------#\n\n # set all r_md(RH>99.5%) to r_md(RH=99.5%) to prevent growth rates inconsistent with impirical equation.\n # replace all r_md values above 0.995 with 0.995\n rh_gt_cap = met['RH_frac'] > rh_cap\n r_md[rh_gt_cap, :] = calc_r_md_t(r_d_microns, rh_cap, alpha_factor)\n\n # --- 0 to efflorescence --- #\n\n # below efflorescence point (0.3 for sulhate, r_md = r_d)\n rh_lt_eff = met['RH_frac'] <= rh_eff\n r_md[rh_lt_eff, :] = r_d_microns\n\n # ------ efflorescence to deliquescence ----------#\n\n # calculate r_md for the deliquescence rh - used in linear interpolation\n r_md_del = calc_r_md_t(r_d_microns, rh_del, alpha_factor)\n\n # all values that need to have some linear interpolation\n bool = np.logical_and(met['RH_frac'] >= rh_eff, met['RH_frac'] <= rh_del)\n rh_bet_eff_del = np.where(bool == True)[0]\n\n # between efflorescence point and deliquescence point, r_md is expected to value linearly between the two\n low_rh = rh_eff\n up_rh = rh_del\n low_r_md = r_d_microns\n up_r_md = r_md_del\n\n diff_rh = up_rh - low_rh\n diff_r_md = r_md_del - r_d_microns\n abs_diff_r_md = abs(diff_r_md)\n\n # find distance rh is along linear interpolation [fraction] from lower limit\n # frac = np.empty(len(r_md))\n # frac[:] = np.nan\n frac = ((met['RH_frac'][rh_bet_eff_del] - low_rh) / diff_rh)\n\n # duplicate abs_diff_r_md by the number of instances needing to be interpolated - helps the calculation below\n # of r_md = ...low + (frac * abs diff)\n abs_diff_r_md_dup = np.tile(abs_diff_r_md, (len(rh_bet_eff_del), 1))\n frac_dup = np.tile(frac, (len(r_d_microns), 1)).transpose()\n\n # calculate interpolated values for r_md\n r_md[rh_bet_eff_del, :] = low_r_md + (frac_dup * abs_diff_r_md_dup)\n\n return r_md",
"def read_f_RH(mod_time, ceil_lam):\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH",
"def subset(self, months):\n #-- check if months is an array or a single value\n months = np.atleast_1d(months)\n #-- number of months\n n = len(months)\n #-- check that all months are available\n months_check = list(set(months) - set(self.month))\n if months_check:\n m = ','.join(['{0:03d}'.format(m) for m in months_check])\n raise IOError('GRACE/GRACE-FO months {0} not Found'.format(m))\n #-- indices to sort data objects\n months_list = [i for i,m in enumerate(self.month) if m in months]\n #-- output spatial object\n temp = spatial(nlon=self.shape[0],nlat=self.shape[1],\n fill_value=self.fill_value)\n #-- create output spatial object\n temp.data = np.zeros((temp.shape[0],temp.shape[1],n))\n temp.mask = np.zeros((temp.shape[0],temp.shape[1],n))\n #-- create output spatial error\n try:\n getattr(self, 'error')\n temp.error = np.zeros((temp.shape[0],temp.shape[1],n))\n except AttributeError:\n pass\n #-- copy dimensions\n temp.lon = self.lon.copy()\n temp.lat = self.lat.copy()\n temp.time = np.zeros((n))\n temp.month = np.zeros((n),dtype=np.int)\n temp.filename = []\n #-- for each indice\n for t,i in enumerate(months_list):\n temp.data[:,:,t] = self.data[:,:,i].copy()\n temp.mask[:,:,t] = self.mask[:,:,i].copy()\n try:\n temp.error[:,:,t] = self.error[:,:,i].copy()\n except AttributeError:\n pass\n #-- copy time dimensions\n temp.time[t] = self.time[i].copy()\n temp.month[t] = self.month[i].copy()\n #-- subset filenmaes\n if getattr(self, 'filename'):\n temp.filename.append(self.filename[i])\n #-- remove singleton dimensions if importing a single value\n return temp.squeeze()",
"def mean12h_values(hourly_temp, hourly_rh):\n\t### KAN BRUKE NP.MEAN!!\n\tmean_RH = [(sum(hourly_rh[a:a+12])/12) for a in range(0, len(hourly_rh)+1, 12)]\n\tdel mean_RH[len(mean_RH) - 1]\n\n\tmean_TS = [(sum(hourly_temp[a:a+12])/12) for a in range(0, len(hourly_temp)+1, 12)]\n\tdel mean_TS[len(mean_TS) - 1]\n\n\treturn np.array(zip(mean_TS, mean_RH))",
"def months_slice(year,hemisphere):\n if hemisphere =='North':\n return slice(str(year-1)+'-10',str(year)+'-09')\n elif hemisphere =='South':\n return slice(str(year-1)+'-04',str(year)+'-03')",
"def Msh2Lab(self, Msh):\n\n # unpack the Msh-array\n M, s, h = Msh.tolist()\n\n # calculation of L, a and b\n L = M*np.cos(s)\n a = M*np.sin(s)*np.cos(h)\n b = M*np.sin(s)*np.sin(h)\n return np.array([L,a,b])",
"def get_SARAS(Dl=500, Dh=1, Dm=1):\n\tN = 10\n\tfreq_low_low = np.linspace(50, 300, N)\n\tfreq_low = np.linspace(301, 2000, N)\n\tfreq_high = np.linspace(2001, 25000, N)\n\n\t''' CONTINUUM SPECIFICATION '''\n\tSARAS_cont_low_low = -17.2708 * np.log10(freq_low_low)-192.0714\n\tSARAS_cont_low = -17.2708 * np.log10(freq_low)-192.0714\n\tSARAS_cont_high = -0.065676 * np.log10(freq_high)-248.8661\n\n\t''' SPECTRAL LINE SPECIFICATION '''\n\tSARAS_spec_low_low = SARAS_cont_low_low + 15.\n\tSARAS_spec_low = SARAS_cont_low + 15.\n\tSARAS_spec_high = SARAS_cont_high + 15.\n\n\t''' RBW's '''\n\tRBW_cont_low_low = 10.*np.log10((1./100.) * freq_low_low * 1.E6)\n\tRBW_cont_low = 10.*np.log10((1./100.) * freq_low * 1.E6)\n\tRBW_cont_high = 10.*np.log10((1./100.) * freq_high * 1.E6)\n\n\tRBW_spec_low_low = 10.*np.log10((0.001/100.) * freq_low_low * 1.E6)\n\tRBW_spec_low = 10.*np.log10((0.001/100.) * freq_low * 1.E6)\n\tRBW_spec_high = 10.*np.log10((0.001/100.) * freq_high * 1.E6)\n\n\t''' PATH LOSS '''\n\tc0 = 3E8\n\tif Dl != 0:\n\t\tD = Dl\n\t\tpathloss_low_low = 10 * np.log10(((4*np.pi*D)/(c0/(freq_low_low * 1.E6)))**2)\n\telse:\n\t\tpathloss_low_low = 0\n\n\tif Dh != 0:\n\t\tD = Dh\n\t\tpathloss_low = 10 * np.log10(((4*np.pi*D)/(c0/(freq_low * 1.E6)))**2)\n\t\tpathloss_high = 10 * np.log10(((4*np.pi*D)/(c0/(freq_high * 1.E6)))**2)\n\n\t\t''' PSD THRESHOLD LEVELS '''\n\t\tPSD_cont_thresh_low_low = SARAS_cont_low_low + pathloss_low_low\n\t\tPSD_cont_thresh_low = SARAS_cont_low + pathloss_low\n\t\tPSD_cont_thresh_high = SARAS_cont_high + pathloss_high\n\n\t\tPSD_spec_thresh_low_low = SARAS_spec_low_low + pathloss_low_low\n\t\tPSD_spec_thresh_low = SARAS_spec_low + pathloss_low\n\t\tPSD_spec_thresh_high = SARAS_spec_high + pathloss_high\n\telse:\n\t\t''' PSD THRESHOLD LEVELS '''\n\t\tPSD_cont_thresh_low_low = SARAS_cont_low_low + pathloss_low_low\n\t\tPSD_cont_thresh_low = SARAS_cont_low \n\t\tPSD_cont_thresh_high = SARAS_cont_high\n\n\t\tPSD_spec_thresh_low_low = SARAS_spec_low_low + pathloss_low_low\n\t\tPSD_spec_thresh_low = SARAS_spec_low\n\t\tPSD_spec_thresh_high = SARAS_spec_high\n\n\n\t''' E-FIELD THRESHOLD LEVELS '''\n\t''' E-field at distance Dm '''\n\n\tDm = 10.\n\tE_cont_low_low = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_low_low)/10.)*0.001) * ((1./100.) * freq_low_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_cont_low = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_low)/10.)*0.001) * ((1./100.) * freq_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_cont_high = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_high)/10.)*0.001) * ((1./100.) * freq_high * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\n\tE_spec_low_low = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_low_low)/10.)*0.001) * ((0.001/100.) * freq_low_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_spec_low = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_low)/10.)*0.001) * ((0.001/100.) * freq_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_spec_high = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_high)/10.)*0.001) * ((0.001/100.) * freq_high * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\n\tfreq = []\n\tfreq.extend(freq_low_low)\n\tfreq.extend(freq_low)\n\tfreq.extend(freq_high)\n\n\tE_cont_threshold = []\n\tfor a in range(0, len(E_cont_low_low)):\n\t\tE_cont_threshold.append(E_cont_low_low[a])\n\tfor a in range(0, len(E_cont_low)):\n\t\tE_cont_threshold.append(E_cont_low[a])\n\tfor a in range(0, len(E_cont_high)):\n\t\tE_cont_threshold.append(E_cont_high[a])\n\n\tE_spec_threshold = []\n\tfor a in range(0, len(E_spec_low_low)):\n\t\tE_spec_threshold.append(E_spec_low_low[a])\n\tfor a in range(0, len(E_spec_low)):\n\t\tE_spec_threshold.append(E_spec_low[a])\n\tfor a in range(0, len(E_spec_high)):\n\t\tE_spec_threshold.append(E_spec_high[a])\n\n\tP_cont_threshold = []\n\tfor a in range(0, len(RBW_cont_low_low)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_low_low[a] + RBW_cont_low_low[a])\n\tfor a in range(0, len(RBW_cont_low)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_low[a] + RBW_cont_low[a])\n\tfor a in range(0, len(RBW_cont_high)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_high[a] + RBW_cont_high[a])\n\n\tP_spec_threshold = []\n\tfor a in range(0, len(RBW_spec_low_low)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_low_low[a] + RBW_spec_low_low[a])\n\tfor a in range(0, len(RBW_spec_low)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_low[a] + RBW_spec_low[a])\n\tfor a in range(0, len(RBW_spec_high)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_high[a] + RBW_spec_high[a])\n\n\treturn freq, E_cont_threshold, E_spec_threshold, P_cont_threshold, P_spec_threshold",
"def index(self, indice, date=True):\n #-- output harmonics object\n temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))\n #-- subset output harmonics\n temp.clm = self.clm[:,:,indice].copy()\n temp.slm = self.slm[:,:,indice].copy()\n #-- subset output dates\n if date:\n temp.time = self.time[indice].copy()\n temp.month = self.month[indice].copy()\n #-- assign ndim and shape attributes\n temp.update_dimensions()\n #-- subset filenames\n if getattr(self, 'filename'):\n temp.filename = self.filename[indice]\n return temp",
"def compute_harmonics(self) :\n\n Ye = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n Yo = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n\n phi = np.zeros((self.n_dir,1))\n for i in xrange(0,self.n_dir) :\n phi[i] = np.arctan(self.omega[i,1]/self.omega[i,0])\n if self.omega[i,0] < 0. :\n phi[i] = phi[i] + np.pi\n\n for l in xrange(0,self.L_max+1) :\n for m in xrange(0,l+1) :\n P_ml = scipy.special.lpmv(m,l,self.omega[:,2])\n# Normalization of the associated Legendre polynomials\n if m == 0 :\n norm_P = P_ml\n else :\n norm_P = (-1.0)**m*np.sqrt(2*sci.factorial(l-m)/sci.factorial(l+m))\\\n *P_ml\n size = norm_P.shape\n for i in xrange(0,size[0]) :\n Ye[l,m,i] = norm_P[i]*np.cos(m*phi[i])\n Yo[l,m,i] = norm_P[i]*np.sin(m*phi[i])\n\n# Build the matrix M \n self.sphr = np.zeros((self.n_dir,self.n_mom))\n self.M = np.zeros((self.n_dir,self.n_mom))\n if self.galerkin == True :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd for L<sn of L=sn and m=0\n if l<self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd for l<=sn\n if l<=self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n else :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1",
"def read_hourly_f_RH(mod_time, ceil_lam):\n\n import sys.platform as platform\n\n # file name and path\n if platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename)\n return f_RH",
"def SeasonalComponent(t, freq, CS_coeff):\n #-------------------------------------------------------------------------------------\n k = 0; freq_ind = 0; seasonal = np.zeros(len(t))\n while freq_ind < len(freq):\n seasonal += CS_coeff[k] * np.cos(2*np.pi*freq[freq_ind]*np.array(t))\n seasonal += CS_coeff[k+1] * np.sin(2*np.pi*freq[freq_ind]*np.array(t))\n freq_ind += 1; k += 2 \n return seasonal",
"def getRSStri(t, u, model, h, K=0):\n \n # remember, unlike Fortran, indices here will start from 0. \n # So remember use begin_idx as one less than what we were using in Fortran.\n # Basically, recresid will get filled from idx=ncols to idx=Sfinal for linear regression.\n # Fortran wud have filled it from idx = ncols+1 to Sfinal.\n\n # build RSS matrix\n if (model == 'linear'):\n ncols = 2\n elif (model == 'harmonic'):\n ncols = 2*K+1\n\n Sfinal = len(t)\n RSStri = [[0 for i in range(Sfinal)] for j in range(Sfinal)]\n brkpt_spacing = int(np.floor(Sfinal * h))\n if brkpt_spacing <= ncols:\n print (\"minimum segment size must be greater than the number of regressors; resetting\")\n brkpt_spacing = ncols + 2 #this number 2 is a random choice\n \n for idx in range(Sfinal- brkpt_spacing +1):\n if (model == 'linear'):\n tmp = recresids(t[idx:], u[idx:], ncols, 'linear', 1)\n elif (model == 'harmonic'):\n tmp = recresids(t[idx:], u[idx:], ncols, 'harmon', K) \n else:\n print (\"model not supported\")\n tmp2 = [i*i for i in tmp]\n RSStri[idx][idx:] = np.cumsum(tmp2)\n \n return RSStri",
"def get_startdat_section_data(date):\n return np.array([date.day, date.month, date.year, 0, 0, 0])",
"def read_and_process_cabauw_data(years = [], months = []):\n if len(months) == 0: months = [s.month]\n else: months = [j if isinstance(j, str) else format(j, '02d') for j in months]\n if len(years) == 0: years = [s.year for j in months]\n else: years = [str(j) for j in years]\n\n data = Cabauw_Data()\n n_days = 0\n for i in range(len(months)):\n print('read_cabauw_data', years[i], months[i])\n f = xr.open_dataset(s.data_path+'cesar_tower_meteo_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n time = np.array(f.variables['time']) #Is given in hours, with data every 10 minutes\n z = np.array(f.variables['z'])\n speed = np.array(f.variables['F'])\n direction = np.array(f.variables['D'])\n T = np.array(f.variables['TA'])\n Td = np.array(f.variables['TD'])\n \n n_time = len(time); n_z = len(z)\n \n #Reshape the data by adding an extra axis that represents different days\n n_times_day = 6*24 #Data every 10 minutes\n n_days_month = int(n_time / n_times_day)\n n_days += n_days_month\n \n hours = np.reshape(np.mod(time, 24), (n_days_month, n_times_day))\n speed = np.reshape(speed, (n_days_month, n_times_day, n_z))\n direction = np.reshape(direction, speed.shape)\n #- signs, because direction gives the direction from which the wind is blowing, and not to which the wind is blowing.\n u = - speed * np.sin(direction * np.pi/180.)\n v = - speed * np.cos(direction * np.pi/180.)\n V = np.zeros(u.shape + (2,))\n V[:,:,:,0] = u; V[:,:,:,1] = v\n T = np.reshape(T, speed.shape)\n theta = T + g/Cp*z[np.newaxis, np.newaxis, :]\n Td = np.reshape(Td, speed.shape)\n \n \n #Import the second file that contains a.o. surface pressures\n f2 = xr.open_dataset(s.data_path+'cesar_surface_meteo_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n p0 = np.reshape(np.array(f2.variables['P0']), speed.shape[:2])\n \n #Import the third file that contains radiation data\n f3 = xr.open_dataset(s.data_path+'cesar_surface_radiation_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n longwave_upward = np.reshape(np.array(f3.variables['LWU']), speed.shape[:2]) \n longwave_downward = np.reshape(np.array(f3.variables['LWD']), speed.shape[:2]) \n \n variables = ['hours','speed','direction','u','v','V','T','theta','Td','z','p0','longwave_upward','longwave_downward']\n for j in variables:\n if i == 0:\n exec('data.'+j+' = '+j)\n elif j != 'z':\n exec('data.'+j+' = np.concatenate([data.'+j+','+j+'], axis = 0)')\n \n return data #Return data object with the data as attributes",
"def test_sv_short_month_multi(self):\n start = datetime.datetime(year=2019, month=8, day=1)\n end = datetime.datetime(year=2019, month=9, day=1)\n queries = [{'key': 'apple', 'geo': 'US'}, {'key': 'google', 'geo': 'US'},\n {'key': 'microsoft', 'geo': 'US'}, {'key': 'oracle', 'geo': 'US'},\n {'key': 'facebook', 'geo': 'US'}, {'key': 'uber', 'geo': 'US'}]\n series = SVSeries.multivariate(self.connection, queries, start, end,\n category=CategoryCodes.COMPUTERS_ELECTRONICS,\n granularity='MONTH')\n data = series.get_data()\n with self.subTest('result_normalized'):\n self.assertTrue(any(data.max() == 100))\n with self.subTest('result_monthly'):\n self.assertEqual(data.shape[0], math.ceil((end - start).days / 30))",
"def __init__(self, degree, zeniths, azimuths, angle_units=1):\n self.degree = degree\n self.num_params = np.sum([2*i+1 for i in range(degree+1) ])\n self.num_measurments = len( zeniths )*len( azimuths )\n\n ## sort angles\n all_azimuths = np.empty(self.num_measurments , dtype=np.double)\n all_zeniths = np.empty(self.num_measurments , dtype=np.double)\n total_i = 0\n for ze in zeniths:\n for az in azimuths:\n all_azimuths[total_i] = az*angle_units\n all_zeniths[total_i] = ze*angle_units\n total_i += 1\n\n ## make matrix\n self.matrix = np.empty( (self.num_measurments, self.num_params), dtype=complex )\n par_i = 0\n for d in range(degree+1):\n ## order zero\n sph_harm(0, d, all_azimuths, all_zeniths, out=self.matrix[:,par_i] )\n par_i += 1\n\n ## all others\n for order in range(1, d+1):\n ## positive\n sph_harm( order, d, all_azimuths, all_zeniths, out=self.matrix[:,par_i] )\n par_i += 1\n ## negative\n sph_harm(-order, d, all_azimuths, all_zeniths, out=self.matrix[:,par_i] )\n par_i += 1\n\n self.all_zeniths = all_zeniths\n self.all_azimuths = all_azimuths\n\n\n self.SA_TMP = np.empty(self.num_measurments , dtype=complex)\n\n d_az = np.average( azimuths[1:] - azimuths[:-1] )\n d_ze = np.average( zeniths[1:] - zeniths[:-1] )\n\n self.dSolidAngle = np.sin(self.all_zeniths)\n self.dSolidAngle *= d_az*d_ze",
"def process_2019_raw_into_monthly() -> List[float]:\n this_file_path = Path(os.path.realpath(__file__))\n this_file_dir = this_file_path.parent\n usage_2019_file = this_file_dir / 'resources' / 'electricity2019.json'\n with usage_2019_file.open() as f:\n usage_2019_data = json.loads(f.read())\n flat_kwh_per_day = []\n for raw_month in usage_2019_data:\n start_date = datetime.strptime(raw_month['StartDate'], '%m/%d/%Y')\n end_date = datetime.strptime(raw_month['EndDate'], '%m/%d/%Y')\n days_in_range = (end_date - start_date).days\n for i in range(days_in_range + 1):\n this_date = start_date + timedelta(i)\n if this_date.year == 2019:\n flat_kwh_per_day.append(raw_month['kWh'])\n day_index = -1\n month_usages = []\n for month in range(1, 13):\n num_days = monthrange(2019, month)[1]\n month_sum = 0\n for day in range(num_days):\n day_index += 1\n month_sum += flat_kwh_per_day[day_index]\n month_usages.append(round(month_sum / num_days, 2))\n return month_usages"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create the S array from the climatology (month, RH_fraction) given the month and RH
|
def get_S_climatology(time, rh_frac, ceil_lam):
# 1. Read in the data
filename = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/' + \
'S_climatology_NK_SMPS_APS_' + str(ceil_lam) + 'nm.npy'
data = np.load(filename).flat[0]
S_clim = data['S_climatology']
S_RH_frac = data['RH_frac']
# 2. Create S array given the time and RH
# get height range from rh_frac
height_idx_range = rh_frac.shape[1]
# find S array
S = np.empty(rh_frac.shape)
S[:] = np.nan
for t, time_t in enumerate(time): # time
# get month idx (e.g. idx for 5th month = 4)
month_idx = time_t.month - 1
for h in range(height_idx_range): # height
# find RH idx for this month, and put the element into the S array
_, rh_idx, _ = eu.nearest(S_RH_frac, rh_frac[t, h])
S[t, h] = S_clim[month_idx, rh_idx]
return S
|
[
"def subset(self, months):\n #-- check if months is an array or a single value\n months = np.atleast_1d(months)\n #-- number of months\n n = len(months)\n #-- check that all months are available\n months_check = list(set(months) - set(self.month))\n if months_check:\n m = ','.join(['{0:03d}'.format(m) for m in months_check])\n raise IOError('GRACE/GRACE-FO months {0} not Found'.format(m))\n #-- indices to sort data objects\n months_list = [i for i,m in enumerate(self.month) if m in months]\n #-- output harmonics object\n temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))\n #-- create output harmonics\n temp.clm = np.zeros((temp.lmax+1,temp.mmax+1,n))\n temp.slm = np.zeros((temp.lmax+1,temp.mmax+1,n))\n temp.time = np.zeros((n))\n temp.month = np.zeros((n),dtype=np.int)\n temp.filename = []\n #-- for each indice\n for t,i in enumerate(months_list):\n temp.clm[:,:,t] = self.clm[:,:,i].copy()\n temp.slm[:,:,t] = self.slm[:,:,i].copy()\n temp.time[t] = self.time[i].copy()\n temp.month[t] = self.month[i].copy()\n if getattr(self, 'filename'):\n temp.filename.append(self.filename[i])\n #-- assign ndim and shape attributes\n temp.update_dimensions()\n #-- remove singleton dimensions if importing a single value\n return temp.squeeze()",
"def _init_seasons_array(self):\n\t\tx = [self.X[i] - self.L[0] for i in range(self.q)]\n\t\tsecond_season_level = np.mean(self.X[self.q:self.q*2], axis=0)\n\t\ty = [self.X[self.q + i] - second_season_level for i in range(self.q)]\n\n\t\tself.S = []\n\t\tfor i in range(len(x)):\n\t\t\tself.S.append((x[i] + y[i]) / 2)\n\t\tself.S = [self.X[i] - self.L[0] for i in range(self.q)]",
"def calc_r_md_species(r_d_microns, met, aer_i):\n\n\n # calulate r_md based on Fitzgerald (1975) eqn 8 - 10\n def calc_r_md_t(r_d_microns, rh_i, alpha_factor):\n\n \"\"\"\n Calculate r_md for a single value of rh (rh_i) at a time t (alpha and beta will be applied to all rbins)\n :param rh_i:\n :param r_d_microns: NOt the duplicated array!\n :return: r_md_i\n\n\n The r_md calculated here will be for a fixed RH, therefore the single row of r_d_microns will be fine, as it\n will compute a single set of r_md as a result.\n \"\"\"\n\n beta = np.exp((0.00077 * rh_i) / (1.009 - rh_i))\n if rh_i < 0.97:\n phi = 1.058 - ((0.0155 * (rh_i - 0.97))\n / (1.02 - (rh_i ** 1.4)))\n else:\n phi = 1.058\n alpha = 1.2 * np.exp((0.066 * rh_i) / (phi - rh_i))\n\n # alpha factor comes from the Table 1 in Fitzgerald (1975) to be used with some other aerosol types\n r_md_t = alpha_factor * alpha * (r_d_microns ** beta)\n\n return r_md_t\n\n\n\n # duplicate the range of radii to multiple rows, one for each RH - shape(time, rbin).\n # Remember: the number in each diameter bin might change, but the bin diameters themselves will not.\n # Therefore this approach works for constant and time varying number distirbutions.\n r_d_microns_dup = np.tile(r_d_microns, (len(met['time']), 1))\n\n # Set up array for aerosol\n r_md = np.empty(len(met['time']))\n r_md[:] = np.nan\n\n phi = np.empty(len(met['time']))\n phi[:] = np.nan\n\n # limits for what approach to use, depending on the RH\n # from the CLASSIC guidence, follows Fitzgerald (1975)\n if aer_i == '(NH4)2SO4':\n rh_cap = 0.995 # calculate r_md specifically for the upper limit (considered max rh)\n rh_del = 0.81 # calculate r_md specifically for the upper limit (start of empirical formula)\n # CLASSIC does linear interpolation bettween rh_del and rh_eff.\n rh_eff = 0.3 # efflorescence (below is dry)\n alpha_factor = 1.0 # a coefficient for alpha, which is specific for different aerosol types\n elif aer_i == 'NH4NO3':\n rh_cap = 0.995\n rh_del = 0.61\n rh_eff = 0.3\n alpha_factor = 1.06\n\n elif aer_i == 'NaCl':\n rh_cap = 0.995\n rh_del = 0.75\n rh_eff = 0.42\n alpha_factor = 1.35\n\n # --------------------------------------------\n # Calculate r_md for the species, given RH\n # -----------------------------------------------\n\n # empirical relationships fitted for radius in micrometers, not meters (according to CLASSIC guidance).\n\n # --- delequescence - rh cap (defined as 0.995. Above this empirical relationship breaks down) --- #\n\n # Currently just calculates it for all, then gets overwritten lower down, depending on their RH (e.g. below eff)\n # ToDo use the rh_bet_del_cap to only calc for those within the del - cap range.\n\n # # between deliquescence and rh_cap (set at 0.995 for all)\n # bool = np.logical_and(WXT['RH_frac'] >= rh_del, WXT['RH_frac'] <= rh_cap)\n # rh_bet_del_cap = np.where(bool == True)[0]\n\n beta = np.exp((0.00077 * met['RH_frac'])/(1.009 - met['RH_frac']))\n rh_lt_97 = met['RH_frac'] < 0.97\n phi[rh_lt_97] = 1.058\n phi[~rh_lt_97] = 1.058 - ((0.0155 * (met['RH_frac'][~rh_lt_97] - 0.97))\n /(1.02 - (met['RH_frac'][~rh_lt_97] ** 1.4)))\n alpha = 1.2 * np.exp((0.066 * met['RH_frac'])/ (phi - met['RH_frac']))\n\n # duplicate values across to all radii bins to help r_md = .. calculation: alpha_dup.shape = (time, rbin)\n alpha_dup = np.tile(alpha, (len(r_d_microns), 1)).transpose()\n beta_dup = np.tile(beta, (len(r_d_microns), 1)).transpose()\n\n r_md = alpha_factor * alpha_dup * (r_d_microns_dup ** beta_dup)\n\n # --- above rh_cap ------#\n\n # set all r_md(RH>99.5%) to r_md(RH=99.5%) to prevent growth rates inconsistent with impirical equation.\n # replace all r_md values above 0.995 with 0.995\n rh_gt_cap = met['RH_frac'] > rh_cap\n r_md[rh_gt_cap, :] = calc_r_md_t(r_d_microns, rh_cap, alpha_factor)\n\n # --- 0 to efflorescence --- #\n\n # below efflorescence point (0.3 for sulhate, r_md = r_d)\n rh_lt_eff = met['RH_frac'] <= rh_eff\n r_md[rh_lt_eff, :] = r_d_microns\n\n # ------ efflorescence to deliquescence ----------#\n\n # calculate r_md for the deliquescence rh - used in linear interpolation\n r_md_del = calc_r_md_t(r_d_microns, rh_del, alpha_factor)\n\n # all values that need to have some linear interpolation\n bool = np.logical_and(met['RH_frac'] >= rh_eff, met['RH_frac'] <= rh_del)\n rh_bet_eff_del = np.where(bool == True)[0]\n\n # between efflorescence point and deliquescence point, r_md is expected to value linearly between the two\n low_rh = rh_eff\n up_rh = rh_del\n low_r_md = r_d_microns\n up_r_md = r_md_del\n\n diff_rh = up_rh - low_rh\n diff_r_md = r_md_del - r_d_microns\n abs_diff_r_md = abs(diff_r_md)\n\n # find distance rh is along linear interpolation [fraction] from lower limit\n # frac = np.empty(len(r_md))\n # frac[:] = np.nan\n frac = ((met['RH_frac'][rh_bet_eff_del] - low_rh) / diff_rh)\n\n # duplicate abs_diff_r_md by the number of instances needing to be interpolated - helps the calculation below\n # of r_md = ...low + (frac * abs diff)\n abs_diff_r_md_dup = np.tile(abs_diff_r_md, (len(rh_bet_eff_del), 1))\n frac_dup = np.tile(frac, (len(r_d_microns), 1)).transpose()\n\n # calculate interpolated values for r_md\n r_md[rh_bet_eff_del, :] = low_r_md + (frac_dup * abs_diff_r_md_dup)\n\n return r_md",
"def read_f_RH(mod_time, ceil_lam):\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH",
"def subset(self, months):\n #-- check if months is an array or a single value\n months = np.atleast_1d(months)\n #-- number of months\n n = len(months)\n #-- check that all months are available\n months_check = list(set(months) - set(self.month))\n if months_check:\n m = ','.join(['{0:03d}'.format(m) for m in months_check])\n raise IOError('GRACE/GRACE-FO months {0} not Found'.format(m))\n #-- indices to sort data objects\n months_list = [i for i,m in enumerate(self.month) if m in months]\n #-- output spatial object\n temp = spatial(nlon=self.shape[0],nlat=self.shape[1],\n fill_value=self.fill_value)\n #-- create output spatial object\n temp.data = np.zeros((temp.shape[0],temp.shape[1],n))\n temp.mask = np.zeros((temp.shape[0],temp.shape[1],n))\n #-- create output spatial error\n try:\n getattr(self, 'error')\n temp.error = np.zeros((temp.shape[0],temp.shape[1],n))\n except AttributeError:\n pass\n #-- copy dimensions\n temp.lon = self.lon.copy()\n temp.lat = self.lat.copy()\n temp.time = np.zeros((n))\n temp.month = np.zeros((n),dtype=np.int)\n temp.filename = []\n #-- for each indice\n for t,i in enumerate(months_list):\n temp.data[:,:,t] = self.data[:,:,i].copy()\n temp.mask[:,:,t] = self.mask[:,:,i].copy()\n try:\n temp.error[:,:,t] = self.error[:,:,i].copy()\n except AttributeError:\n pass\n #-- copy time dimensions\n temp.time[t] = self.time[i].copy()\n temp.month[t] = self.month[i].copy()\n #-- subset filenmaes\n if getattr(self, 'filename'):\n temp.filename.append(self.filename[i])\n #-- remove singleton dimensions if importing a single value\n return temp.squeeze()",
"def mean12h_values(hourly_temp, hourly_rh):\n\t### KAN BRUKE NP.MEAN!!\n\tmean_RH = [(sum(hourly_rh[a:a+12])/12) for a in range(0, len(hourly_rh)+1, 12)]\n\tdel mean_RH[len(mean_RH) - 1]\n\n\tmean_TS = [(sum(hourly_temp[a:a+12])/12) for a in range(0, len(hourly_temp)+1, 12)]\n\tdel mean_TS[len(mean_TS) - 1]\n\n\treturn np.array(zip(mean_TS, mean_RH))",
"def months_slice(year,hemisphere):\n if hemisphere =='North':\n return slice(str(year-1)+'-10',str(year)+'-09')\n elif hemisphere =='South':\n return slice(str(year-1)+'-04',str(year)+'-03')",
"def Msh2Lab(self, Msh):\n\n # unpack the Msh-array\n M, s, h = Msh.tolist()\n\n # calculation of L, a and b\n L = M*np.cos(s)\n a = M*np.sin(s)*np.cos(h)\n b = M*np.sin(s)*np.sin(h)\n return np.array([L,a,b])",
"def get_SARAS(Dl=500, Dh=1, Dm=1):\n\tN = 10\n\tfreq_low_low = np.linspace(50, 300, N)\n\tfreq_low = np.linspace(301, 2000, N)\n\tfreq_high = np.linspace(2001, 25000, N)\n\n\t''' CONTINUUM SPECIFICATION '''\n\tSARAS_cont_low_low = -17.2708 * np.log10(freq_low_low)-192.0714\n\tSARAS_cont_low = -17.2708 * np.log10(freq_low)-192.0714\n\tSARAS_cont_high = -0.065676 * np.log10(freq_high)-248.8661\n\n\t''' SPECTRAL LINE SPECIFICATION '''\n\tSARAS_spec_low_low = SARAS_cont_low_low + 15.\n\tSARAS_spec_low = SARAS_cont_low + 15.\n\tSARAS_spec_high = SARAS_cont_high + 15.\n\n\t''' RBW's '''\n\tRBW_cont_low_low = 10.*np.log10((1./100.) * freq_low_low * 1.E6)\n\tRBW_cont_low = 10.*np.log10((1./100.) * freq_low * 1.E6)\n\tRBW_cont_high = 10.*np.log10((1./100.) * freq_high * 1.E6)\n\n\tRBW_spec_low_low = 10.*np.log10((0.001/100.) * freq_low_low * 1.E6)\n\tRBW_spec_low = 10.*np.log10((0.001/100.) * freq_low * 1.E6)\n\tRBW_spec_high = 10.*np.log10((0.001/100.) * freq_high * 1.E6)\n\n\t''' PATH LOSS '''\n\tc0 = 3E8\n\tif Dl != 0:\n\t\tD = Dl\n\t\tpathloss_low_low = 10 * np.log10(((4*np.pi*D)/(c0/(freq_low_low * 1.E6)))**2)\n\telse:\n\t\tpathloss_low_low = 0\n\n\tif Dh != 0:\n\t\tD = Dh\n\t\tpathloss_low = 10 * np.log10(((4*np.pi*D)/(c0/(freq_low * 1.E6)))**2)\n\t\tpathloss_high = 10 * np.log10(((4*np.pi*D)/(c0/(freq_high * 1.E6)))**2)\n\n\t\t''' PSD THRESHOLD LEVELS '''\n\t\tPSD_cont_thresh_low_low = SARAS_cont_low_low + pathloss_low_low\n\t\tPSD_cont_thresh_low = SARAS_cont_low + pathloss_low\n\t\tPSD_cont_thresh_high = SARAS_cont_high + pathloss_high\n\n\t\tPSD_spec_thresh_low_low = SARAS_spec_low_low + pathloss_low_low\n\t\tPSD_spec_thresh_low = SARAS_spec_low + pathloss_low\n\t\tPSD_spec_thresh_high = SARAS_spec_high + pathloss_high\n\telse:\n\t\t''' PSD THRESHOLD LEVELS '''\n\t\tPSD_cont_thresh_low_low = SARAS_cont_low_low + pathloss_low_low\n\t\tPSD_cont_thresh_low = SARAS_cont_low \n\t\tPSD_cont_thresh_high = SARAS_cont_high\n\n\t\tPSD_spec_thresh_low_low = SARAS_spec_low_low + pathloss_low_low\n\t\tPSD_spec_thresh_low = SARAS_spec_low\n\t\tPSD_spec_thresh_high = SARAS_spec_high\n\n\n\t''' E-FIELD THRESHOLD LEVELS '''\n\t''' E-field at distance Dm '''\n\n\tDm = 10.\n\tE_cont_low_low = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_low_low)/10.)*0.001) * ((1./100.) * freq_low_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_cont_low = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_low)/10.)*0.001) * ((1./100.) * freq_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_cont_high = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_high)/10.)*0.001) * ((1./100.) * freq_high * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\n\tE_spec_low_low = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_low_low)/10.)*0.001) * ((0.001/100.) * freq_low_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_spec_low = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_low)/10.)*0.001) * ((0.001/100.) * freq_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_spec_high = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_high)/10.)*0.001) * ((0.001/100.) * freq_high * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\n\tfreq = []\n\tfreq.extend(freq_low_low)\n\tfreq.extend(freq_low)\n\tfreq.extend(freq_high)\n\n\tE_cont_threshold = []\n\tfor a in range(0, len(E_cont_low_low)):\n\t\tE_cont_threshold.append(E_cont_low_low[a])\n\tfor a in range(0, len(E_cont_low)):\n\t\tE_cont_threshold.append(E_cont_low[a])\n\tfor a in range(0, len(E_cont_high)):\n\t\tE_cont_threshold.append(E_cont_high[a])\n\n\tE_spec_threshold = []\n\tfor a in range(0, len(E_spec_low_low)):\n\t\tE_spec_threshold.append(E_spec_low_low[a])\n\tfor a in range(0, len(E_spec_low)):\n\t\tE_spec_threshold.append(E_spec_low[a])\n\tfor a in range(0, len(E_spec_high)):\n\t\tE_spec_threshold.append(E_spec_high[a])\n\n\tP_cont_threshold = []\n\tfor a in range(0, len(RBW_cont_low_low)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_low_low[a] + RBW_cont_low_low[a])\n\tfor a in range(0, len(RBW_cont_low)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_low[a] + RBW_cont_low[a])\n\tfor a in range(0, len(RBW_cont_high)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_high[a] + RBW_cont_high[a])\n\n\tP_spec_threshold = []\n\tfor a in range(0, len(RBW_spec_low_low)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_low_low[a] + RBW_spec_low_low[a])\n\tfor a in range(0, len(RBW_spec_low)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_low[a] + RBW_spec_low[a])\n\tfor a in range(0, len(RBW_spec_high)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_high[a] + RBW_spec_high[a])\n\n\treturn freq, E_cont_threshold, E_spec_threshold, P_cont_threshold, P_spec_threshold",
"def index(self, indice, date=True):\n #-- output harmonics object\n temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))\n #-- subset output harmonics\n temp.clm = self.clm[:,:,indice].copy()\n temp.slm = self.slm[:,:,indice].copy()\n #-- subset output dates\n if date:\n temp.time = self.time[indice].copy()\n temp.month = self.month[indice].copy()\n #-- assign ndim and shape attributes\n temp.update_dimensions()\n #-- subset filenames\n if getattr(self, 'filename'):\n temp.filename = self.filename[indice]\n return temp",
"def compute_harmonics(self) :\n\n Ye = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n Yo = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n\n phi = np.zeros((self.n_dir,1))\n for i in xrange(0,self.n_dir) :\n phi[i] = np.arctan(self.omega[i,1]/self.omega[i,0])\n if self.omega[i,0] < 0. :\n phi[i] = phi[i] + np.pi\n\n for l in xrange(0,self.L_max+1) :\n for m in xrange(0,l+1) :\n P_ml = scipy.special.lpmv(m,l,self.omega[:,2])\n# Normalization of the associated Legendre polynomials\n if m == 0 :\n norm_P = P_ml\n else :\n norm_P = (-1.0)**m*np.sqrt(2*sci.factorial(l-m)/sci.factorial(l+m))\\\n *P_ml\n size = norm_P.shape\n for i in xrange(0,size[0]) :\n Ye[l,m,i] = norm_P[i]*np.cos(m*phi[i])\n Yo[l,m,i] = norm_P[i]*np.sin(m*phi[i])\n\n# Build the matrix M \n self.sphr = np.zeros((self.n_dir,self.n_mom))\n self.M = np.zeros((self.n_dir,self.n_mom))\n if self.galerkin == True :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd for L<sn of L=sn and m=0\n if l<self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd for l<=sn\n if l<=self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n else :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1",
"def read_hourly_f_RH(mod_time, ceil_lam):\n\n import sys.platform as platform\n\n # file name and path\n if platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename)\n return f_RH",
"def SeasonalComponent(t, freq, CS_coeff):\n #-------------------------------------------------------------------------------------\n k = 0; freq_ind = 0; seasonal = np.zeros(len(t))\n while freq_ind < len(freq):\n seasonal += CS_coeff[k] * np.cos(2*np.pi*freq[freq_ind]*np.array(t))\n seasonal += CS_coeff[k+1] * np.sin(2*np.pi*freq[freq_ind]*np.array(t))\n freq_ind += 1; k += 2 \n return seasonal",
"def getRSStri(t, u, model, h, K=0):\n \n # remember, unlike Fortran, indices here will start from 0. \n # So remember use begin_idx as one less than what we were using in Fortran.\n # Basically, recresid will get filled from idx=ncols to idx=Sfinal for linear regression.\n # Fortran wud have filled it from idx = ncols+1 to Sfinal.\n\n # build RSS matrix\n if (model == 'linear'):\n ncols = 2\n elif (model == 'harmonic'):\n ncols = 2*K+1\n\n Sfinal = len(t)\n RSStri = [[0 for i in range(Sfinal)] for j in range(Sfinal)]\n brkpt_spacing = int(np.floor(Sfinal * h))\n if brkpt_spacing <= ncols:\n print (\"minimum segment size must be greater than the number of regressors; resetting\")\n brkpt_spacing = ncols + 2 #this number 2 is a random choice\n \n for idx in range(Sfinal- brkpt_spacing +1):\n if (model == 'linear'):\n tmp = recresids(t[idx:], u[idx:], ncols, 'linear', 1)\n elif (model == 'harmonic'):\n tmp = recresids(t[idx:], u[idx:], ncols, 'harmon', K) \n else:\n print (\"model not supported\")\n tmp2 = [i*i for i in tmp]\n RSStri[idx][idx:] = np.cumsum(tmp2)\n \n return RSStri",
"def get_startdat_section_data(date):\n return np.array([date.day, date.month, date.year, 0, 0, 0])",
"def read_and_process_cabauw_data(years = [], months = []):\n if len(months) == 0: months = [s.month]\n else: months = [j if isinstance(j, str) else format(j, '02d') for j in months]\n if len(years) == 0: years = [s.year for j in months]\n else: years = [str(j) for j in years]\n\n data = Cabauw_Data()\n n_days = 0\n for i in range(len(months)):\n print('read_cabauw_data', years[i], months[i])\n f = xr.open_dataset(s.data_path+'cesar_tower_meteo_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n time = np.array(f.variables['time']) #Is given in hours, with data every 10 minutes\n z = np.array(f.variables['z'])\n speed = np.array(f.variables['F'])\n direction = np.array(f.variables['D'])\n T = np.array(f.variables['TA'])\n Td = np.array(f.variables['TD'])\n \n n_time = len(time); n_z = len(z)\n \n #Reshape the data by adding an extra axis that represents different days\n n_times_day = 6*24 #Data every 10 minutes\n n_days_month = int(n_time / n_times_day)\n n_days += n_days_month\n \n hours = np.reshape(np.mod(time, 24), (n_days_month, n_times_day))\n speed = np.reshape(speed, (n_days_month, n_times_day, n_z))\n direction = np.reshape(direction, speed.shape)\n #- signs, because direction gives the direction from which the wind is blowing, and not to which the wind is blowing.\n u = - speed * np.sin(direction * np.pi/180.)\n v = - speed * np.cos(direction * np.pi/180.)\n V = np.zeros(u.shape + (2,))\n V[:,:,:,0] = u; V[:,:,:,1] = v\n T = np.reshape(T, speed.shape)\n theta = T + g/Cp*z[np.newaxis, np.newaxis, :]\n Td = np.reshape(Td, speed.shape)\n \n \n #Import the second file that contains a.o. surface pressures\n f2 = xr.open_dataset(s.data_path+'cesar_surface_meteo_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n p0 = np.reshape(np.array(f2.variables['P0']), speed.shape[:2])\n \n #Import the third file that contains radiation data\n f3 = xr.open_dataset(s.data_path+'cesar_surface_radiation_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n longwave_upward = np.reshape(np.array(f3.variables['LWU']), speed.shape[:2]) \n longwave_downward = np.reshape(np.array(f3.variables['LWD']), speed.shape[:2]) \n \n variables = ['hours','speed','direction','u','v','V','T','theta','Td','z','p0','longwave_upward','longwave_downward']\n for j in variables:\n if i == 0:\n exec('data.'+j+' = '+j)\n elif j != 'z':\n exec('data.'+j+' = np.concatenate([data.'+j+','+j+'], axis = 0)')\n \n return data #Return data object with the data as attributes",
"def test_sv_short_month_multi(self):\n start = datetime.datetime(year=2019, month=8, day=1)\n end = datetime.datetime(year=2019, month=9, day=1)\n queries = [{'key': 'apple', 'geo': 'US'}, {'key': 'google', 'geo': 'US'},\n {'key': 'microsoft', 'geo': 'US'}, {'key': 'oracle', 'geo': 'US'},\n {'key': 'facebook', 'geo': 'US'}, {'key': 'uber', 'geo': 'US'}]\n series = SVSeries.multivariate(self.connection, queries, start, end,\n category=CategoryCodes.COMPUTERS_ELECTRONICS,\n granularity='MONTH')\n data = series.get_data()\n with self.subTest('result_normalized'):\n self.assertTrue(any(data.max() == 100))\n with self.subTest('result_monthly'):\n self.assertEqual(data.shape[0], math.ceil((end - start).days / 30))",
"def __init__(self, degree, zeniths, azimuths, angle_units=1):\n self.degree = degree\n self.num_params = np.sum([2*i+1 for i in range(degree+1) ])\n self.num_measurments = len( zeniths )*len( azimuths )\n\n ## sort angles\n all_azimuths = np.empty(self.num_measurments , dtype=np.double)\n all_zeniths = np.empty(self.num_measurments , dtype=np.double)\n total_i = 0\n for ze in zeniths:\n for az in azimuths:\n all_azimuths[total_i] = az*angle_units\n all_zeniths[total_i] = ze*angle_units\n total_i += 1\n\n ## make matrix\n self.matrix = np.empty( (self.num_measurments, self.num_params), dtype=complex )\n par_i = 0\n for d in range(degree+1):\n ## order zero\n sph_harm(0, d, all_azimuths, all_zeniths, out=self.matrix[:,par_i] )\n par_i += 1\n\n ## all others\n for order in range(1, d+1):\n ## positive\n sph_harm( order, d, all_azimuths, all_zeniths, out=self.matrix[:,par_i] )\n par_i += 1\n ## negative\n sph_harm(-order, d, all_azimuths, all_zeniths, out=self.matrix[:,par_i] )\n par_i += 1\n\n self.all_zeniths = all_zeniths\n self.all_azimuths = all_azimuths\n\n\n self.SA_TMP = np.empty(self.num_measurments , dtype=complex)\n\n d_az = np.average( azimuths[1:] - azimuths[:-1] )\n d_ze = np.average( zeniths[1:] - zeniths[:-1] )\n\n self.dSolidAngle = np.sin(self.all_zeniths)\n self.dSolidAngle *= d_az*d_ze",
"def process_2019_raw_into_monthly() -> List[float]:\n this_file_path = Path(os.path.realpath(__file__))\n this_file_dir = this_file_path.parent\n usage_2019_file = this_file_dir / 'resources' / 'electricity2019.json'\n with usage_2019_file.open() as f:\n usage_2019_data = json.loads(f.read())\n flat_kwh_per_day = []\n for raw_month in usage_2019_data:\n start_date = datetime.strptime(raw_month['StartDate'], '%m/%d/%Y')\n end_date = datetime.strptime(raw_month['EndDate'], '%m/%d/%Y')\n days_in_range = (end_date - start_date).days\n for i in range(days_in_range + 1):\n this_date = start_date + timedelta(i)\n if this_date.year == 2019:\n flat_kwh_per_day.append(raw_month['kWh'])\n day_index = -1\n month_usages = []\n for month in range(1, 13):\n num_days = monthrange(2019, month)[1]\n month_sum = 0\n for day in range(num_days):\n day_index += 1\n month_sum += flat_kwh_per_day[day_index]\n month_usages.append(round(month_sum / num_days, 2))\n return month_usages"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read in the hourly f_RH data from netCDF file for all aerosols EW 21/02/17
|
def read_hourly_f_RH(mod_time, ceil_lam):
import sys.platform as platform
# file name and path
if platform == 'linux2':
miedir = '/data/jcmm1/ewarren/Mie/'
else:
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'
filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'
# read data
# f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])
f_RH = netCDF_read(miedir + filename)
return f_RH
|
[
"def read_f_RH(mod_time, ceil_lam):\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH",
"def read_wxt_obs(day, time, z):\n\n filepath = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + day.strftime('%Y') + '_15min.nc'\n wxt_obs = eu.netCDF_read(filepath, vars=['time', 'RH', 'Tair', 'press'])\n\n # extract out RH obs to match mod_time\n # pull out ALL the nearest time idxs and differences\n # the mod_data time is the same for all sites so can therefore use any site\n t_idx = np.array([eu.nearest(wxt_obs['time'], t)[1] for t in time])\n t_diff = np.array([eu.nearest(wxt_obs['time'], t)[2] for t in time])\n\n wxt_obs['RH'] = wxt_obs['RH'][t_idx] # [%]\n wxt_obs['Tair'] = wxt_obs['Tair'][t_idx] # [degC]\n wxt_obs['press'] = wxt_obs['press'][t_idx] # [hPa]\n wxt_obs['time'] = wxt_obs['time'][t_idx]\n # wxt_obs['rawtime'] = wxt_obs['rawtime'][t_idx]\n\n # overwrite t_idx locations where t_diff is too high with nans\n # only keep t_idx values where the difference is below 1 hour\n bad = np.array([abs(i.days * 86400 + i.seconds) > 60 * 60 for i in t_diff])\n\n wxt_obs['RH'][bad] = np.nan\n wxt_obs['Tair'][bad] = np.nan\n wxt_obs['press'][bad] = np.nan\n\n wxt_obs['time'][bad] = np.nan\n # wxt_obs['rawtime'][bad] = np.nan\n\n # create RH_frac using RH data\n wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0\n\n # calculate extra variables\n e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure\n e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure\n wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure\n wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]\n wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]\n wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]\n wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]\n\n # extend the wxt obs in height to match the dimensions of model RH\n # copy the obs so it is the same at all heights\n for var, item in wxt_obs.iteritems():\n if var not in ['time', 'rawtime']:\n # wxt_obs[var] = np.transpose(np.tile(item, (int(rh_frac.shape[1]), 1)))\n wxt_obs[var] = np.transpose(np.tile(item, (int(z.shape[-1]), 1)))\n\n return wxt_obs",
"def read_ncep(ncdf_path,year):\r\n\r\n # path to the netcdf files\r\n ncdf_AT_file = os.path.join(ncdf_path,'.'.join(['air','{:0>4}'.format(year),'nc']))\r\n ncdf_GH_file = os.path.join(ncdf_path,'.'.join(['hgt','{:0>4}'.format(year),'nc']))\r\n ncdf_SH_file = os.path.join(ncdf_path,'.'.join(['shum','{:0>4}'.format(year),'nc']))\r\n\r\n print('Read global',year,'NCEP data ...')\r\n # Air Temperature\r\n DATA = read_data(netCDF4.Dataset(ncdf_AT_file,'r'), ['air'])\r\n if len(DATA['air']) < 17:\r\n print('Need 17 levels of AT data: found only ',len(lev_AT))\r\n\r\n # Specific Humidity\r\n SHUM_DATA = read_data(netCDF4.Dataset(ncdf_SH_file,'r'), ['shum'])\r\n if len(SHUM_DATA['level']) < 8:\r\n print('Need 8 levels of SH data: found only ',len(lev_SH))\r\n\r\n if list(SHUM_DATA['level'])!=list(DATA['level'][:len(SHUM_DATA['level'])]):\r\n print('Warning: air and shum do not share the same lower pressure levels')\r\n\r\n DATA.update(SHUM_DATA)\r\n\r\n # Geopotential Height\r\n GH_DATA = read_data(netCDF4.Dataset(ncdf_GH_file,'r'), ['hgt'])\r\n if len(GH_DATA['level']) < 17:\r\n print('Need 17 levels of GH data: found only ',len(lev_GH))\r\n\r\n DATA.update(GH_DATA)\r\n\r\n for key in DATA:\r\n if 'air' in key:\r\n DATA[key.replace('air','T')] = DATA[key]\r\n del DATA[key]\r\n if 'hgt' in key:\r\n DATA[key.replace('hgt','H')] = DATA[key]\r\n del DATA[key]\r\n if 'shum' in key:\r\n DATA[key.replace('shum','QV')] = DATA[key]\r\n del DATA[key]\r\n\r\n DATA['lev'] = DATA['level']\r\n del DATA['level']\r\n\r\n return DATA",
"def add_data2daily_netcdf_met(half_hourly_nc, daily_nc):\n hh_data = nC.Dataset(half_hourly_nc, 'r')\n d_data = nC.Dataset(daily_nc, 'a')\n # half hourly data\n hh_times = hh_data.variables['time']\n hh_air_temps = hh_data.variables['air_temp']\n hh_soil_temps = hh_data.variables['soil_temp']\n hh_rg = hh_data.variables['rg']\n is_day = hh_data.variables['is_day']\n # daily data\n daily_times = d_data.variables['time']\n rg_day = d_data.variables['rg']\n daily_mean_temp = d_data.variables['daily_mean_temp']\n daily_max_temp = d_data.variables['daily_max_temp']\n daily_min_temp = d_data.variables['daily_min_temp']\n mean_temp_day = d_data.variables['mean_temp_day']\n mean_temp_night = d_data.variables['mean_temp_night']\n daily_mean_soil_temp = d_data.variables['daily_mean_soil_temp']\n mean_soil_temp_day = d_data.variables['mean_soil_temp_day']\n mean_soil_temp_night = d_data.variables['mean_soil_temp_night']\n doy = d_data.variables['doy']\n day_length = d_data.variables['day_length']\n night_length = d_data.variables['night_length']\n\n time_lst = nC.num2date(daily_times[:], daily_times.units)\n nc_doy(doy, daily_times)\n nc_day_len(is_day, day_length, hh_times, time_lst)\n nc_night_len(is_day, night_length, hh_times, time_lst)\n print 'times done'\n # update rg values\n daily_rg_values(hh_rg, rg_day)\n print 'rg done'\n # update daily air temps\n daily_temperatures(hh_air_temps, daily_mean_temp, daily_max_temp, daily_min_temp)\n nc_day_mean_temp(is_day, hh_air_temps, mean_temp_day, hh_times, time_lst)\n nc_night_mean_temp(is_day, hh_air_temps, mean_temp_night, hh_times, time_lst)\n print 'temps done'\n # update daily soil temps\n daily_soil_temperatures(hh_soil_temps, daily_mean_soil_temp)\n nc_day_mean_temp(is_day, hh_soil_temps, mean_soil_temp_day, hh_times, time_lst)\n nc_night_mean_temp(is_day, hh_soil_temps, mean_soil_temp_night, hh_times, time_lst)\n print 'soil temps done'\n hh_data.close()\n d_data.close()\n return 'yay'",
"def read_all_rh_obs(day, site_rh, rhDatadir, mod_data):\n\n # define array\n rh_obs = {}\n\n # get date string for obs of the main and following days\n doyStr = day.strftime('%Y%j')\n # doyStr2 = (day + dt.timedelta(hours=24)).strftime('%Y%j')\n\n for site, height in site_rh.iteritems():\n\n rh_obs[site] = {}\n\n # rh_fnames = [rhDatadir + site + '_' + doyStr + '_1min.nc',\n # rhDatadir + site + '_' + doyStr2 + '_1min.nc']\n\n rh_fnames = rhDatadir + site + '_' + doyStr + '_1min.nc'\n\n # read in all data\n data_obs = eu.netCDF_read(rh_fnames, vars=['RH', 'time'])\n data_obs['height'] = height\n\n # find nearest time in rh time\n # pull out ALL the nearest time idxs and differences\n t_idx = np.array([eu.nearest(data_obs['time'], t)[1] for t in mod_data[mod_data.keys()[0]]['time']])\n t_diff = np.array([eu.nearest(data_obs['time'], t)[2] for t in mod_data[mod_data.keys()[0]]['time']])\n\n # extract hours\n rh_obs[site]['RH'] = data_obs['RH'][t_idx]\n rh_obs[site]['height'] = data_obs['height']\n rh_obs[site]['time'] = [data_obs['time'][i] for i in t_idx]\n\n # overwrite t_idx locations where t_diff is too high with nans\n # only keep t_idx values where the difference is below 5 minutes\n bad = np.array([abs(i.days * 86400 + i.seconds) > 10 * 60 for i in t_diff])\n rh_obs[site]['RH'][bad] = np.nan\n\n # change flags to nans\n rh_obs[site]['RH'][np.where(rh_obs[site]['RH'] < 0)] = np.nan\n\n return rh_obs",
"def read_f_RH(ceil_lam):\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH",
"def cfht_weather_data(year, month, day, hour, minute,\n dir='/u/ghezgroup/code/python/keckdar/'):\n\n temperature = np.zeros(len(year), dtype=float)\n pressure = np.zeros(len(year), dtype=float)\n humidity = np.zeros(len(year), dtype=float)\n wind_speed = np.zeros(len(year), dtype=float)\n wind_dir = np.zeros(len(year), dtype=float)\n\n\n cfht_file = None\n\n for ii in range(len(year)):\n cfht_file_new = dir + 'cfht-wx.' + str(year[ii]) + '.' + \\\n str(month[ii]).zfill(2) + '.dat'\n\n if (cfht_file != cfht_file_new):\n cfht_file = cfht_file_new\n cfht = asciidata.open(cfht_file)\n\n atmYear = cfht[0].tonumpy()\n atmMonth = cfht[1].tonumpy()\n atmDay = cfht[2].tonumpy()\n atmHour = cfht[3].tonumpy()\n atmMin = cfht[4].tonumpy() # HST times\n atmWindSpeed = cfht[5].tonumpy() # km/h\n atmWindDir = cfht[6].tonumpy() # degrees\n atmTemp = cfht[7].tonumpy() # Celsius\n atmHumidity = cfht[8].tonumpy() # percent\n atmPressure = cfht[9].tonumpy() # mb pressure\n\n\n # Find the exact time match for year, month, day, hour\n idx = (np.where((atmDay == day[ii]) & (atmHour == hour[ii])))[0]\n \n if (len(idx) == 0):\n print 'Could not find DAR data for %4d-%2d-%2d %2d:%2d in %s' % \\\n (year, month, day, hour, minute, logFile)\n\n # Find the closest minute\n mdx = abs(atmMin[idx] - minute[ii]).argmin()\n match = idx[ mdx ]\n\n # Ambient Temperature (Celsius)\n temperature[ii] = atmTemp[match]\n\n # Pressure at the observer (millibar)\n # Should be around 760.0 millibars\n pressure[ii] = atmPressure[match]\n\n # Relative humidity (%)\n # Should be around 0.1 %\n humidity[ii] = atmHumidity[match]\n\n # Wind speed (km/h)\n wind_speed[ii] = atmWindSpeed[match]\n\n # Wind direction (degrees)\n wind_dir[ii] = atmWindDir[match]\n\n return temperature, pressure, humidity, wind_speed, wind_dir",
"def add_data2daily_netcdf_nee(half_hourly_nc, daily_nc):\n hh_data = nC.Dataset(half_hourly_nc, 'r')\n d_data = nC.Dataset(daily_nc, 'a')\n # half hourly data\n hh_times = hh_data.variables['time']\n is_day = hh_data.variables['is_day']\n co2_flux = hh_data.variables['processed_co2_flux']\n qc_co2_flux = hh_data.variables['qc_co2_flux']\n wind_dir = hh_data.variables['wind_dir']\n foot_print = hh_data.variables['foot_print']\n # daily data\n daily_times = d_data.variables['time']\n nee = d_data.variables['nee']\n nee_std = d_data.variables['nee_std']\n nee_day = d_data.variables['nee_day']\n nee_day_std = d_data.variables['nee_day_std']\n nee_night = d_data.variables['nee_night']\n nee_night_std = d_data.variables['nee_night_std']\n nee_origin = d_data.variables['nee_origin']\n nee_origin_day = d_data.variables['nee_day_origin']\n nee_origin_night = d_data.variables['nee_night_origin']\n\n time_lst = nC.num2date(daily_times[:], daily_times.units)\n # update daily NEE values and origins\n process_co2_flux_daily_d(co2_flux, qc_co2_flux, nee, nee_std, is_day, wind_dir, nee_origin, foot_print, hh_times,\n time_lst, 12, 25)\n #print 'daily nee done'\n # update daytime NEE values and origins\n #process_co2_flux_daytime_d(co2_flux, qc_co2_flux, nee_day, nee_day_std, is_day, wind_dir, nee_origin_day,\n # foot_print, hh_times, time_lst, 6, 12)\n #print 'daytime nee done'\n # update nighttime NEE values and origins\n #process_co2_flux_nighttime_d(co2_flux, qc_co2_flux, nee_night, nee_night_std, is_day, wind_dir, nee_origin_night,\n # foot_print, hh_times, time_lst, 1, 3)\n #print 'nighttime nee done'\n hh_data.close()\n d_data.close()\n return 'yay'",
"def load_heat_waves(filename):\n from netCDF4 import Dataset\n import numpy as np\n ncin = Dataset(filename, 'r')\n maskfile = ('/srv/ccrc/data35/z5032520/AWAP/mask/varmask.nc')\n mask1file = ('/srv/ccrc/data35/z5032520/AWAP/mask/AWAP_Land-Sea-Mask_0.5deg.nc')\n varmasknc = Dataset(maskfile,'r')\n varmask = varmasknc.variables['mask'][:]\n hwf = ncin.variables['HWF_EHF'][:]\n mask1nc = Dataset(mask1file,'r')\n mask = abs(mask1nc.variables['LSM'][:]-1)\n mask2 = (mask+varmask)>0\n mask1 = np.empty(hwf.shape)\n for n in range(hwf.shape[0]):\n mask1[n, :, :] = mask2\n hwf = np.ma.array(ncin.variables['HWF_EHF'][:], mask=mask1)\n hwn = np.ma.array(ncin.variables['HWN_EHF'][:], mask=mask1)\n hwf[hwn.data==0] = 0\n hwd = np.ma.array(ncin.variables['HWD_EHF'][:], mask=mask1)\n hwd[hwn.data==0] = 0\n hwa = np.ma.array(ncin.variables['HWA_EHF'][:], mask=mask1)\n hwa[hwn.data==0] = 0\n hwm = np.ma.array(ncin.variables['HWM_EHF'][:], mask=mask1)\n hwm[hwn.data==0] = 0\n hwt = np.ma.array(ncin.variables['HWT_EHF'][:], mask=mask1)\n hwt[hwn.data==0] = 0\n lat = ncin.variables['lat'][:]\n lon = ncin.variables['lon'][:]\n times = ncin.variables['time'][:]\n return hwf, hwn, hwd, hwa, hwm, hwt, lat, lon, times",
"def read_obs_hmv_declination(obscode, year_st, year_fn, folder):\n\n OBSY = obscode.upper()\n obsy = obscode.lower()\n # Read in the observatory data one year file at a time and construct filenames\n datareq = pd.DataFrame()\n for year in range(year_st, year_fn+1):\n ystr = str(year)\n file = obsy + ystr + 'dhor.hor'\n fpf = folder + '/' + file\n tmp = IAGA2002_Data_Reader(fpf)\n tmp.columns = [col.strip(OBSY) for col in tmp.columns]\n tmp = tmp.replace(99999.00, np.nan)\n # Calculate D (in degrees) if not given in the file\n if('D' not in tmp.columns):\n dvals, hvals, ivalsm, fvals = xyz2dhif(tmp['X'], tmp['Y'], tmp['Z'])\n tmp.insert(loc=1, column='D', value=dvals.values)\n else:\n # Convert the reported values to degrees\n tmp['D'] = tmp.D.values/60.0\n datareq = datareq.append(tmp[['D']])\n return(datareq)",
"def readNetCDF_Forecast(infile, outfile, monf, fyr, tgti, tgtf, tar, wlo1, elo1, sla1, nla1):\n ds=xr.open_dataset(infile,decode_times=False)\n da=list(ds.coords)\n\n for i in range(len(da)):\n if da[i]=='X' or da[i]=='lon' or da[i]=='longitude':\n ds = ds.rename({da[i]:'X'})\n if da[i]=='Y' or da[i]=='lat' or da[i]=='latitude':\n ds = ds.rename({da[i]:'Y'})\n if da[i]=='S':\n deltastyr=int(ds[da[i]][0]/12)\n nmon=ds.S.shape[0]\n nyr=int(nmon/12)\n if 'months since' in ds.S.units:\n line=ds.S.units\n stdate=str(int(line.split()[2][:4])+deltastyr)+line.split()[2][-6:]\n ds['S'] = pd.date_range(stdate, periods=ds.S.shape[0], freq='M')\n\n ds1=ds.sel(X=slice(wlo1,elo1),Y=slice(sla1,nla1),L=slice(float(tgti),float(tgtf))).mean(dim='L',skipna=True)\n ds2=ds1.mean(dim='M',skipna=True)\n Xarr=ds2.X.values\n Yarr=ds2.Y.values\n W=ds2.X.shape[0]\n H=ds2.Y.shape[0]\n a=list(ds)\n\n var1=ds2[a[0]]\n units=ds[a[0]].units\n Ti=fyr\n\n vari = a[0]\n varname = vari\n L=0.5*(float(tgtf)+float(tgti))\n\n monthdic = {'Jan':'01','Feb':'02','Mar':'03','Apr':'04','May':'05','Jun':'06','Jul':'07','Aug':'08','Sep':'09','Oct':'10','Nov':'11','Dec':'12'}\n S1=monthdic[monf]\n mi=monthdic[tar.split(\"-\")[0]]\n mf=monthdic[tar.split(\"-\")[1]]\n\n var1_stmon=var1[(var1.S.dt.month==int(monthdic[monf]))]\n var=var1_stmon.groupby(var1_stmon.S.dt.year).mean(dim=('S')).sel(year=fyr)\n var_N2S=var.reindex(Y=var.Y[::-1])\n Yarr=var_N2S.Y.values\n if tar=='Dec-Feb' or tar=='Nov-Jan': #double check years are sync\n xyear=True #flag a cross-year season\n else:\n xyear=False\n T=1\n Tarr = np.arange(Ti, Ti+T)\n\n if 'True' in np.isnan(var):\n var[np.isnan(var)]=-999. #use CPT missing value\n #Now write the CPT file\n outfile=\"usr_fcst_\"+a[0]+\"_\"+tar+\"_ini\"+monf+str(fyr)+\".tsv\"\n f = open(outfile, 'w')\n f.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n f.write(\"cpt:nfields=1\\n\")\n\n for it in range(T):\n if xyear==True:\n f.write(\"cpt:field=\"+vari+\", cpt:L=\"+str(L)+\" months, cpt:S=\"+str(Tarr[it])+\"-\"+S1+\"-01T00:00, cpt:T=\"+str(Tarr[it])+\"-\"+mi+\"/\"+str(Tarr[it]+1)+\"-\"+mf+\", cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n else:\n f.write(\"cpt:field=\"+vari+\", cpt:L=\"+str(L)+\" months, cpt:S=\"+str(Tarr[it])+\"-\"+S1+\"-01T00:00, cpt:T=\"+str(Tarr[it])+\"-\"+mi+\"/\"+mf+\", cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n np.savetxt(f, Xarr, fmt=\"%.6f\",newline='\\t')\n f.write(\"\\n\") #next line\n for iy in range(H):\n np.savetxt(f,np.r_[Yarr[iy],var_N2S[iy,0:]],fmt=\"%.6f\", newline='\\t') #excise extra line\n f.write(\"\\n\") #next line\n f.close()",
"def apr3read(filename):\n \n \n apr = {}\n flag = 0\n\n ##Radar varibles in hdf file found by hdf.datasets\n radar_freq = 'zhh14' #Ku\n radar_freq2 = 'zhh35' #Ka\n radar_freq3 = 'z95s' #W\n radar_freq4 = 'ldr14' #LDR\n vel_str = 'vel14' #Doppler\n ##\n\n\n\n hdf = h5py.File(filename,\"r\")\n\n listofkeys = hdf['lores'].keys()\n alt = hdf['lores']['alt3D'][:]\n lat = hdf['lores']['lat'][:]\n lon = hdf['lores']['lon'][:]\n time = hdf['lores']['scantime'][:]\n surf = hdf['lores']['surface_index'][:]\n isurf = hdf['lores']['isurf'][:]\n plane = hdf['lores']['alt_nav'][:]\n radar = hdf['lores'][radar_freq][:]\n radar2 = hdf['lores'][radar_freq2][:]\n radar4 = hdf['lores'][radar_freq4][:]\n vel = hdf['lores']['vel14c'][:]\n lon3d = hdf['lores']['lon3D'][:]\n lat3d = hdf['lores']['lat3D'][:]\n alt3d = hdf['lores']['alt3D'][:]\n\n #see if there is W band\n if 'z95s' in listofkeys:\n if 'z95n' in listofkeys:\n radar_nadir = hdf['lores']['z95n']\n radar_scanning = hdf['lores']['z95s']\n radar3 = radar_scanning\n ##uncomment if you want high sensativty as nadir scan (WARNING, CALIBRATION)\n #radar3[:,12,:] = radar_nadir[:,12,:]\n else:\n radar3 = hdf['lores']['z95s']\n print('No vv, using hh')\n else:\n radar3 = np.ma.array([])\n flag = 1\n print('No W band')\n\n ##convert time to datetimes\n time_dates = np.empty(time.shape,dtype=object)\n for i in np.arange(0,time.shape[0]):\n for j in np.arange(0,time.shape[1]):\n tmp = datetime.datetime.utcfromtimestamp(time[i,j])\n time_dates[i,j] = tmp\n\n #Create a time at each gate (assuming it is the same down each ray, there is a better way to do this) \n time_gate = np.empty(lat3d.shape,dtype=object)\n for k in np.arange(0,550):\n for i in np.arange(0,time_dates.shape[0]):\n for j in np.arange(0,time_dates.shape[1]):\n time_gate[k,i,j] = time_dates[i,j] \n\n #Quality control (masked where invalid)\n radar = np.ma.masked_where(radar <= -99,radar)\n radar2 = np.ma.masked_where(radar2 <= -99,radar2)\n radar3 = np.ma.masked_where(radar3 <= -99,radar3)\n radar4 = np.ma.masked_where(radar4 <= -99,radar4)\n \n #Get rid of nans, the new HDF has builtin\n radar = np.ma.masked_where(np.isnan(radar),radar)\n radar2 = np.ma.masked_where(np.isnan(radar2),radar2)\n radar3 = np.ma.masked_where(np.isnan(radar3),radar3)\n radar4 = np.ma.masked_where(np.isnan(radar4),radar4)\n\n\n apr['Ku'] = radar\n apr['Ka'] = radar2\n apr['W'] = radar3\n apr['DFR_1'] = radar - radar2 #Ku - Ka\n\n if flag == 0:\n apr['DFR_3'] = radar2 - radar3 #Ka - W\n apr['DFR_2'] = radar - radar3 #Ku - W\n apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward]'\n else:\n apr['DFR_3'] = np.array([]) #Ka - W\n apr['DFR_2'] = np.array([]) #Ku - W\n apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward], Note No W band avail'\n\n apr['ldr'] = radar4\n apr['vel'] = vel\n apr['lon'] = lon\n apr['lat'] = lat\n apr['alt_gate'] = alt3d\n apr['alt_plane'] = plane\n apr['surface'] = isurf \n apr['time']= time\n apr['timedates']= time_dates\n apr['time_gate'] = time_gate\n apr['lon_gate'] = lon3d\n apr['lat_gate'] = lat3d\n\n # fileheader = hdf.select('fileheader')\n roll = hdf['lores']['roll']\n pitch = hdf['lores']['pitch']\n drift = hdf['lores']['drift']\n\n ngates = alt.shape[0] \n\n apr['ngates'] = ngates\n apr['roll'] = roll\n apr['pitch'] = pitch\n apr['drift'] = drift\n\n _range = np.arange(15,550*30,30)\n _range = np.asarray(_range,float)\n ind = np.where(_range >= plane.mean())\n _range[ind] = np.nan\n apr['range'] = _range\n\n return apr",
"def read_calfits( f ):\n hdu = pf.open( f )\n wlmin = hdu[0].header['crval1']\n res = hdu[0].header['cdelt1']\n length = hdu[0].data.shape[-1]\n wl = np.arange( wlmin, wlmin+res*length, res )\n fl = hdu[0].data[0]\n er = hdu[0].data[1]\n return wl,fl,er",
"def readMETEO(filename, headonly=False, **kwargs):\n\n starttime = kwargs.get('starttime')\n endtime = kwargs.get('endtime')\n takehelium = kwargs.get('takehelium')\n debug = kwargs.get('debug')\n getfile = True\n\n heliumcols = []\n\n stream = DataStream()\n\n if debug:\n print (\"METEO: found RCS meteo data\")\n\n # Check whether header infromation is already present\n headers = {}\n\n theday = extractDateFromString(filename)\n\n try:\n if starttime:\n if not theday[-1] >= datetime.date(stream._testtime(starttime)):\n getfile = False\n if endtime:\n if not theday[0] <= datetime.date(stream._testtime(endtime)):\n getfile = False\n except:\n print(\"Did not recognize the date format\")\n # Date format not recognized. Need to read all files\n getfile = True\n\n fh = open(filename, 'rb')\n\n array = [[] for key in KEYLIST]\n fkeys = []\n felements = []\n\n if getfile:\n for line in fh:\n line = line.decode('utf-8',errors='ignore')\n if line.isspace():\n # blank line\n continue\n elif line.startswith(' '):\n continue\n elif line.startswith('Date'):\n # Read the header information\n #1) first get number of columns\n cols = line.split()\n if not takehelium:\n try:\n columns = [elem for elem in cols if not elem.startswith('He')]\n except:\n print(\"Found error in header\", filename)\n columns = []\n else:\n columns = cols\n for i, elem in enumerate(columns):\n if i > 1:\n key = KEYLIST[i-1]\n fkeys.append(key)\n headers['col-'+key] = elem.replace('_','')\n headers['unit-col-'+key] = '-'\n\n else:\n colsstr = line.split()\n if not takehelium:\n try:\n colsstr = [elem for i, elem in enumerate(colsstr) if not cols[i].startswith('He')]\n except:\n print(\"Found error in data sequence\", filename)\n #print colsstr\n break\n row = LineStruct()\n try:\n date = colsstr[0]+'-'+colsstr[1]\n array[0].append(date2num(datetime.strptime(date,\"%Y%m%d-%H%M%S\")))\n #row.time = date2num(datetime.strptime(date,\"%Y%m%d-%H%M%S\"))\n for i in range(2,len(colsstr)):\n key = KEYLIST[i-1]\n if not key.startswith('str') and not key in ['flag','comment','typ']:\n array[i-1].append(float(colsstr[i]))\n #exec('row.'+key+' = float(colsstr[i])')\n elif not key in ['flag','comment','typ']:\n array[i-1].append(str(float(colsstr[i])))\n #exec('row.'+key+' = str(float(colsstr[i]))')\n #row.typ = 'other'\n #stream.add(row)\n except:\n pass\n\n for idx,el in enumerate(array):\n array[idx] = np.asarray(el)\n\n headers['SensorDescription'] = 'RCS: filtered Meteorlogical data - Andreas Winkelbauer'\n headers['SensorName'] = 'Various Meteorology sensors'\n headers['SensorID'] = 'METEO_RCS2015_0001'\n headers['SensorType'] = 'Various'\n headers['SensorModule'] = 'RCS'\n headers['SensorDataLogger'] = 'F77'\n headers['SensorGroup'] = 'environment'\n headers['DataFormat'] = 'RCSMETEO v3.0'\n headers['col-t2'] = '430UEV' # Necessary because of none UTF8 coding in header\n headers['col-f'] = 'T'\n headers['unit-col-f'] = 'deg C'\n headers['col-z'] = 'Schneehoehe'\n headers['unit-col-z'] = 'cm'\n if not takehelium:\n headers['col-t1'] = 'rh'\n headers['unit-col-t1'] = 'percent'\n headers['col-var5'] = 'P'\n headers['unit-col-var5'] = 'hPa'\n headers['col-var1'] = 'Wind'\n headers['unit-col-var1'] = 'm/s'\n\n headers['SensorKeys'] = ','.join(fkeys)\n headers['SensorElements'] = ','.join([headers['col-'+key] for key in KEYLIST if key in fkeys])\n\n if debug:\n print (\"METEO: Successfully loaded METEO data\")\n return DataStream([LineStruct()], headers, np.asarray(array,dtype=object))",
"def read_focal_temp(tyear, yday, tstart, tstop):\n#\n#--- if y daay is less than 8, read the data from the last year\n#\n if yday < 8:\n ifile = '/data/mta/Script/ACIS/Focal/Data/focal_plane_data_5min_avg_' + str(tyear-1)\n data = read_data_file(ifile, sep='\\s+', c_len=2)\n ftime = data[0]\n focal = data[1]\n else:\n ftime = []\n focal = []\n#\n#--- otherwise, just read this year\n#\n ifile = '/data/mta/Script/ACIS/Focal/Data/focal_plane_data_5min_avg_' + str(tyear)\n data = read_data_file(ifile, sep='\\s+', c_len=2)\n ftime = ftime + data[0]\n focal = focal + data[1]\n#\n#--- select out the data for the last 7 days\n#\n [ftime, focal] = select_data_by_date(ftime, focal, tstart, tstop)\n\n return [ftime, focal]",
"def load_data_from_nc():\n \n file_data = Dataset(\"air.mon.mean.nc\", \"r\")\n latitudes = file_data.variables[\"lat\"][:] \n longitudes = file_data.variables[\"lon\"][:]\n times = file_data.variables[\"time\"][:] \n air_temperatures = file_data.variables[\"air\"][:] \n file_data.close()\n \n return latitudes, longitudes, times, air_temperatures",
"def read_and_process_cabauw_data(years = [], months = []):\n if len(months) == 0: months = [s.month]\n else: months = [j if isinstance(j, str) else format(j, '02d') for j in months]\n if len(years) == 0: years = [s.year for j in months]\n else: years = [str(j) for j in years]\n\n data = Cabauw_Data()\n n_days = 0\n for i in range(len(months)):\n print('read_cabauw_data', years[i], months[i])\n f = xr.open_dataset(s.data_path+'cesar_tower_meteo_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n time = np.array(f.variables['time']) #Is given in hours, with data every 10 minutes\n z = np.array(f.variables['z'])\n speed = np.array(f.variables['F'])\n direction = np.array(f.variables['D'])\n T = np.array(f.variables['TA'])\n Td = np.array(f.variables['TD'])\n \n n_time = len(time); n_z = len(z)\n \n #Reshape the data by adding an extra axis that represents different days\n n_times_day = 6*24 #Data every 10 minutes\n n_days_month = int(n_time / n_times_day)\n n_days += n_days_month\n \n hours = np.reshape(np.mod(time, 24), (n_days_month, n_times_day))\n speed = np.reshape(speed, (n_days_month, n_times_day, n_z))\n direction = np.reshape(direction, speed.shape)\n #- signs, because direction gives the direction from which the wind is blowing, and not to which the wind is blowing.\n u = - speed * np.sin(direction * np.pi/180.)\n v = - speed * np.cos(direction * np.pi/180.)\n V = np.zeros(u.shape + (2,))\n V[:,:,:,0] = u; V[:,:,:,1] = v\n T = np.reshape(T, speed.shape)\n theta = T + g/Cp*z[np.newaxis, np.newaxis, :]\n Td = np.reshape(Td, speed.shape)\n \n \n #Import the second file that contains a.o. surface pressures\n f2 = xr.open_dataset(s.data_path+'cesar_surface_meteo_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n p0 = np.reshape(np.array(f2.variables['P0']), speed.shape[:2])\n \n #Import the third file that contains radiation data\n f3 = xr.open_dataset(s.data_path+'cesar_surface_radiation_lc1_t10_v1.0_'+years[i]+months[i]+'.nc', decode_times = False)\n longwave_upward = np.reshape(np.array(f3.variables['LWU']), speed.shape[:2]) \n longwave_downward = np.reshape(np.array(f3.variables['LWD']), speed.shape[:2]) \n \n variables = ['hours','speed','direction','u','v','V','T','theta','Td','z','p0','longwave_upward','longwave_downward']\n for j in variables:\n if i == 0:\n exec('data.'+j+' = '+j)\n elif j != 'z':\n exec('data.'+j+' = np.concatenate([data.'+j+','+j+'], axis = 0)')\n \n return data #Return data object with the data as attributes",
"def read_mxl_forcing(ffile, start_time=None, end_time=None, dt0=1800.0, dt=1800.0, na_values='NaN'):\n \n dat = pd.read_csv(ffile, sep=';', header='infer')\n tvec = pd.to_datetime(dat[['year', 'month', 'day', 'hour', 'minute']])\n dat.index = tvec\n \n dat['doy'] = dat['doy'].astype(float)\n dat['Prec'] = dat['Prec'] / dt0 # mm s-1\n dat['P'] *= 1e3 # Pa\n dat['H2O'] *= 1e-3 # mol/mol\n \n # --- get period \n if start_time == None:\n start_time = dat.index[0]\n if end_time == None:\n end_time = dat.index[-1]\n \n dat = dat[start_time:end_time]\n \n # -- convert surface fluxes to kinematic fluxes\n T = dat.Ta.values # degC\n P = dat.P.values # Pa\n h2o = dat.H2O.values * P # Pa\n Pdry = P - h2o # Pa pressure of dry air\n\n rhoa = (Pdry*MAIR_DRY + h2o*MH2O) / (R*(T+NT)) # kg m-3\n Lv = 1.0e6*(2.501 - 2.361e-3*T) # J kg-1 \n Mair = P / (R *(T+NT)) # m3 mol-1\n print(np.mean(Mair), np.mean(rhoa))\n \n dat['wt'] = dat.H.values / (rhoa * CP_AIR_MASS) # K m s-1\n dat['wq'] = dat.LE / (rhoa * Lv) # kg/kg m s-1\n dat['wc'] = dat.NEE / Mair # umol/mol m s-1 = ppm m s-1\n \n #dat = dat[['doy','Ta', 'P', 'ust', 'wt', 'wq', 'wc']]\n \n # interpolate to dt\n if dt != dt0:\n step = '%dS' %dt\n sampler = dat.resample(step)\n dat = sampler.interpolate(method='nearest')\n\n return dat",
"def readData(period):\n if period == 'future':\n directory = '/volumes/eas-shared/ault/ecrl/spring-indices/LENS_springonset/data/cesm1.lens.1920-2005.cvdp_data/'\n NAO = []\n PDO = []\n NINO = []\n ens = list(xrange(2,31))\n for i in xrange(len(ens)):\n files = 'CESM1-CAM5-BGC-LE_%s.cvdp_data.2013-2100.nc' % ens[i]\n filename = directory + files\n values = Dataset(filename)\n time = values.variables['time'][:]\n pdo = values.variables['pdo_timeseries_mon'][:]\n nao = values.variables['nao_pc_mon'][:]\n nino = values.variables['nino34'][:]\n values.close()\n \n NAO.append(nao)\n PDO.append(pdo)\n NINO.append(nino)\n time = np.asarray(time)\n PDO = np.asarray(PDO)\n NINO = np.asarray(NINO)\n NAO = np.asarray(NAO)\n PDOyr = np.reshape(PDO,(PDO.shape[0],PDO.shape[1]/12.,12.))\n PDOave = np.nanmean(PDOyr,axis=2)\n NAOyr = np.reshape(NAO,(NAO.shape[0],NAO.shape[1]/12.,12.))\n NAOave = np.nanmean(NAOyr,axis=2)\n NINOyr = np.reshape(NINO,(NINO.shape[0],NINO.shape[1]/12.,12.))\n NINOave = np.nanmean(NINOyr,axis=2)\n \n leafmean, latmean, lstfrz, lat, lon = SIx() \n leafmean = leafmean[:,7:,:,:]\n latmean = latmean[:,7:,:,:]\n PDOave = PDOave[:,:-20]\n NAOave = NAOave[:,:-20]\n NINOave = NINOave[:,:-20]\n return PDOyr,PDOave,NAOyr,NAOave,NINOyr,NINOave,leafmean,latmean,lat,lon\n elif period == 'historical':\n directory = '/volumes/eas-shared/ault/ecrl/spring-indices/LENS_springonset/data/cesm1.lens.1920-2005.cvdp_data/'\n NAO = []\n PDO = []\n NINO = []\n ens = list(xrange(2,31))\n for i in xrange(len(ens)):\n files = 'CESM1-CAM5-BGC-LE_%s.cvdp_data.1920-2005.nc' % ens[i]\n filename = directory + files\n values = Dataset(filename)\n time = values.variables['time'][:]\n pdo = values.variables['pdo_timeseries_mon'][:]\n nao = values.variables['nao_pc_mon'][:]\n nino = values.variables['nino34'][:]\n values.close()\n \n NAO.append(nao)\n PDO.append(pdo)\n NINO.append(nino)\n time = np.asarray(time)\n PDO = np.asarray(PDO)\n NINO = np.asarray(NINO)\n NAO = np.asarray(NAO)\n PDOyr = np.reshape(PDO,(PDO.shape[0],PDO.shape[1]/12.,12.))\n PDOave = np.nanmean(PDOyr,axis=2)\n NAOyr = np.reshape(NAO,(NAO.shape[0],NAO.shape[1]/12.,12.))\n NAOave = np.nanmean(NAOyr,axis=2)\n NINOyr = np.reshape(NINO,(NINO.shape[0],NINO.shape[1]/12.,12.))\n NINOave = np.nanmean(NINOyr,axis=2)\n \n leafmean, latmean, lat, lon = SIxHistorical()\n return PDOyr,PDOave,NAOyr,NAOave,NINOyr,NINOave,leafmean,latmean,lat,lon"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate Q_ext_wet using Q_ext_dry and f(RH) for current wavelength Q_ext,dry and f_RH are monthly varying based on obs at NK and CH for urban and rural site default settings respectively. f_RH also varies with geometric radius. EW 23/02/17
|
def calc_Q_ext_wet(ceil_lam, r_d, r_g, rh_frac, mod_time):
import sys
if sys.platform == 'linux2':
sys.path.append('/net/home/mm0100/ewarren/Documents/AerosolBackMod/scripts/ellUtils') # general utils
from ellUtils import nearest, netCDF_read, binary_search_nearest
else:
from ellUtils.ellUtils import nearest, netCDF_read, binary_search_nearest
# Reading functions
def read_f_RH(mod_time, ceil_lam):
"""
Read in the f_RH data from netCDF file
EW 21/02/17
:param mod_time (array of datetimes) datetimes for the timesteps
:param ceil_lam: (int) ceilometer wavelength [nm]
:return: data = {RH:... f_RH:...}
"""
# file name and path
if sys.platform == 'linux2':
miedir = '/data/jcmm1/ewarren/Mie/'
else:
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'
filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'
# read data
# f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])
f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])
return f_RH
def read_Q_ext_dry(mod_time, ceil_lam):
"""
Read in the Q_ext for dry murk.
EW 21/02/17
:param mod_time (array of datetimes) datetimes for the timesteps
:param ceil_lam: (int) ceilometer wavelength [nm]
:return: Q_ext_dry = {radius:... Q_ext_dry:...}
"""
if sys.platform == 'linux2':
miedir = '/data/jcmm1/ewarren/Mie/'
else:
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'
filename = 'urban_monthly_Q_ext_dry_' + str(ceil_lam) + 'nm.csv'
raw = np.loadtxt(miedir + filename, delimiter=',')
# format data into a dictionary
Q_ext_dry = {'radius_m': raw[:, 0],
'Q_ext_dry': raw[:, 1:]} # Q_ext_dry['Q_ext_dry'].shape(radii, month)
return Q_ext_dry
# ---------------------------
# cronvert geometric radius to nm to find f(RH)
r_g_nm = r_g * 1.0e9
# height idx range of r_d and RH
height_idx_range = r_d.shape[1]
# read in Q_ext_dry and f(RH) look up tables
f_RH = read_f_RH(mod_time, ceil_lam) # ['f_RH MURK'].shape(month, radii, RH)
Q_ext_dry = read_Q_ext_dry(mod_time, ceil_lam) #.shape(radii, month)
# create matric of Q_ext_dry based on r_d
Q_ext_dry_matrix = np.empty(r_d.shape)
Q_ext_dry_matrix[:] = np.nan
f_RH_matrix = np.empty(r_d.shape)
f_RH_matrix[:] = np.nan
# # find Q_ext dry, given the dry radius matrix
# # find f(RH), given the RH fraction matric
# loop through all elements of the array
# idx is the full position of the element e.g. idx = (24L, 69L, 11L, 21L) - (time, height, lat, lon)
for idx, _ in np.ndenumerate(r_d):
# month idx for Q_ext_dry
month_idx = mod_time[idx[0]].month - 1
# debugging
# if (idx[1] == 0) & (idx[2] == 0) & (idx[3] == 0):
# print idx
# Q_ext_dry - binary
# LUT uses r_d (volume) [meters]
r_Q_idx = binary_search_nearest(Q_ext_dry['radius_m'], r_d[idx])
Q_ext_dry_matrix[idx] = Q_ext_dry['Q_ext_dry'][r_Q_idx, month_idx]
# f(RH) (has it's own r_idx that is in units [nm])
# LUT uses r_g (geometric) [nm] ToDo should change this to meters...
r_f_RH_idx = binary_search_nearest(f_RH['radii_range'], r_g_nm[idx])
rh_idx = binary_search_nearest(f_RH['RH'], rh_frac[idx])
f_RH_matrix[idx] = f_RH['f(RH) MURK'][month_idx, r_f_RH_idx, rh_idx]
# calculate Q_ext_wet
Q_ext = Q_ext_dry_matrix * f_RH_matrix
return Q_ext, Q_ext_dry_matrix, f_RH_matrix
|
[
"def calc_Q_ext_wet(ceil_lam, r_md, RH):\n\n from ellUtils import nearest\n\n def read_f_RH(ceil_lam):\n \"\"\"\n Read in the f_RH data from csv\n EW 21/02/17\n\n :param filename:\n :return: data = {RH:... f_RH:...}\n\n filename must be in the form of 'calculated_ext_f(RH)_[ceil_lambda]nm.csv'\n \"\"\"\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH\n\n def read_Q_dry_ext(ceil_lam):\n \"\"\"\n Read in the Q_ext for dry murk.\n EW 21/02/17\n\n :param filename:\n :param lam:\n :return: Q_ext_dry = {radius:... Q_ext_dry:...}\n\n Requres the wavelength to be passed, just so in the future, the 910 nm file is not incorrectly used by mistake when\n it should use the file for another wavelength.\n \"\"\"\n\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n filename = 'calculated_Q_ext_' + str(ceil_lam) + 'nm.csv'\n\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n Q_ext_dry = {'radius': raw[:, 0],\n 'Q_ext': raw[:, 1]}\n\n return Q_ext_dry\n\n RH_factor = 0.01 # Relative Humidity in 0.38 not 38%\n\n # calculate Q_ext_wet\n f_RH = read_f_RH(ceil_lam)\n Q_ext_dry = read_Q_dry_ext(ceil_lam)\n\n # create matric of Q_ext_dry based on r_md\n Q_ext_dry_matrix = np.empty(r_md.shape)\n f_RH_matrix = np.empty(RH.shape)\n\n # find Q_ext dry, given the dry radius matrix\n if r_md.size != 1:\n for i in range(r_md.shape[0]):\n idx = nearest(Q_ext_dry['radius'], r_md[i])[1]\n Q_ext_dry_matrix[i] = Q_ext_dry['Q_ext'][idx]\n\n else:\n idx = nearest(Q_ext_dry['radius'], r_md)[1]\n Q_ext_dry_matrix = Q_ext_dry['Q_ext'][idx]\n\n # find f(RH), given the RH matrix\n # need RH factor as f_RH['RH'] in units of frac not percentage\n if RH.size != 1:\n for i in range(RH.shape[0]):\n idx = nearest(f_RH['RH'], RH_factor * RH[i])[1]\n f_RH_matrix[i] = f_RH['f_RH'][idx]\n else:\n idx = nearest(f_RH['RH'], RH_factor * RH)[1]\n f_RH_matrix = f_RH['f_RH'][idx]\n\n # calculate Q_ext_wet\n Q = Q_ext_dry_matrix * f_RH_matrix\n # print np.mean(Q_ext_dry_matrix[:,:20])\n\n return Q, Q_ext_dry_matrix, f_RH_matrix",
"def func_mmq_CR72(self, params):\n\n # Scaling.\n if self.scaling_flag:\n params = dot(params, self.scaling_matrix)\n\n # Unpack the parameter values.\n R20 = params[:self.end_index[0]]\n dw = params[self.end_index[0]:self.end_index[1]]\n dwH = params[self.end_index[1]:self.end_index[2]]\n pA = params[self.end_index[2]]\n kex = params[self.end_index[2]+1]\n\n # Convert dw and dwH from ppm to rad/s. Use the out argument, to pass directly to structure.\n multiply( multiply.outer( dw.reshape(1, self.NS), self.nm_no_nd_ones ), self.frqs, out=self.dw_struct )\n multiply( multiply.outer( dwH.reshape(1, self.NS), self.nm_no_nd_ones ), self.frqs_H, out=self.dwH_struct )\n\n # Reshape R20 to per experiment, spin and frequency.\n self.r20_struct[:] = multiply.outer( R20.reshape(self.NE, self.NS, self.NM), self.no_nd_ones )\n\n # Loop over the experiment types.\n for ei in range(self.NE):\n r20 = self.r20_struct[ei]\n dw_frq = self.dw_struct[ei]\n dwH_frq = self.dwH_struct[ei]\n\n # Alias the dw frequency combinations.\n aliased_dwH = 0.0\n if self.exp_types[ei] == EXP_TYPE_CPMG_SQ:\n aliased_dw = dw_frq\n elif self.exp_types[ei] == EXP_TYPE_CPMG_PROTON_SQ:\n aliased_dw = dwH_frq\n elif self.exp_types[ei] == EXP_TYPE_CPMG_DQ:\n aliased_dw = dw_frq + dwH_frq\n elif self.exp_types[ei] == EXP_TYPE_CPMG_ZQ:\n aliased_dw = dw_frq - dwH_frq\n elif self.exp_types[ei] == EXP_TYPE_CPMG_MQ:\n aliased_dw = dw_frq\n aliased_dwH = dwH_frq\n elif self.exp_types[ei] == EXP_TYPE_CPMG_PROTON_MQ:\n aliased_dw = dwH_frq\n aliased_dwH = dw_frq\n\n # Back calculate the R2eff values.\n r2eff_mmq_cr72(r20=r20, pA=pA, dw=aliased_dw, dwH=aliased_dwH, kex=kex, cpmg_frqs=self.cpmg_frqs[ei], inv_tcpmg=self.inv_relax_times[ei], tcp=self.tau_cpmg[ei], back_calc=self.back_calc[ei])\n\n # Clean the data for all values, which is left over at the end of arrays.\n self.back_calc = self.back_calc*self.disp_struct\n\n # For all missing data points, set the back-calculated value to the measured values so that it has no effect on the chi-squared value.\n if self.has_missing:\n # Replace with values.\n self.back_calc[self.mask_replace_blank.mask] = self.values[self.mask_replace_blank.mask]\n\n # Calculate the chi-squared statistic.\n return chi2_rankN(self.values, self.back_calc, self.errors)",
"def q_to_iwc(q, model, region):\n if model.lower() == \"fv3\":\n t = get_temp(model, region)\n qv = get_qv(model, region)\n p = get_pres(model, region)\n rho = p / \\\n (287*(1 + 0.61*(qv))*(np.nanmean(t, axis=(2))[:,:,np.newaxis,np.newaxis]))\n iwc = q.values * rho\n print(\"Warning: FV3 uses the spatially averaged density b/c \\\n specific humidity and temperature are on different grids\")\n elif model.lower() ==\"sam\":\n t = get_temp(model, region).values\n qv = get_qv(model, region).values\n p = get_pres(model, region).values\n rho = p[:,:,np.newaxis,np.newaxis] / \\\n (287*(1 + 0.61*qv)*t)\n iwc = q.values * rho\n else:\n if model.lower() == \"icon\":\n t = get_temp(model, region).values.astype('float32')\n qv = get_qv(model, region).values.astype('float16')\n Tv = (1 + 0.61*qv)*t\n print(\"... Tv ...\")\n del qv, t\n p = get_pres(model, region).values.astype('float32')\n else:\n t = get_temp(model, region).values\n qv = get_qv(model, region).values\n p = get_pres(model, region).values\n Tv = (1 + 0.61*qv)*t\n print(\"... Tv ...\")\n del qv, t\n rho = p / (287*Tv) \n print(\"... rho ...\")\n del p, Tv\n iwc = q * rho # kg/m2\n print(\"... iwc ...\")\n del rho\n print(\"Returning ice water content (kg/m3) for %s as %s xarray\\n\\n\"%(model, iwc.shape))\n iwcxr = xr.DataArray(iwc, dims=list(q.dims), coords=q.coords, \n attrs={'standard_name':'iwc','long_name':'ice_water_content','units':'kg/m3'})\n return iwcxr",
"def test1_UBandModelwithQBandMS(self):\n\n # The MS is in Q band, so deliberately choose the U band model so that the structure\n # is not too far off, but whether or not its flux density is scaled makes a difference.\n\n print \"Running multiple setjy with different parameters...\"\n for use_oldstandard in [False, True]:\n # for debugging ...\n #for use_oldstandard in [True]:\n selStandard = (\"Perley-Taylor 99\" if use_oldstandard else \"Perley-Butler 2010\")\n print \"!!!!! Run with standard=\\\"%s\\\" !!!!!\" % selStandard\n self.result[use_oldstandard] = self.run_setjy(use_oldstandard)\n\n \n print \"!!!! Run with standard=\\\"manual\\\", fluxdensity !!!!!\"\n self.result['fluxdens'] = self.run_setjy(False, 1234.0)\n print \"!!!! Run with standard=\\\"manual\\\", fluxdensity and spix !!!!!\"\n self.result['spix'] = self.run_setjy(False,1234.0 * (43.42064/35.0)**0.7,-0.7,\"35.0GHz\")\n\n # check on HISTORY sub-table entries - does not check for values\n \"\"\"Flux density in HISTORY (old standard)?\"\"\"\n #no scaling\n #self.check_history(self.result[True]['history'],[\"Scaling spw 1's model image to I =\"])\n if not self.ismms: self.check_history(self.result[True]['history'],[\"fld ind 12) spw 1 [I=\"])\n \"\"\"Flux density in HISTORY (new default standard)?\"\"\"\n if not self.ismms: self.check_history(self.result[False]['history'],[\"Scaling spw(s) [0, 1]'s model image to I =\"])\n #\"\"\"Flux density in HISTORY (fluxdensity)?\"\"\" <= no flux density is written in HISTORY, just input flux dens.\n #self.check_history(self.result['fluxdens']['history'],[\"Scaling spw 1's model image to I =\"])\n \"\"\"Flux density in HISTORY (spix)?\"\"\"\n #self.check_history(self.result['spix']['history'],[\"Scaling spw 1's model image to I =\"])\n if not self.ismms: self.check_history(self.result['spix']['history'],[\"Flux density as a function of frequency\"])\n\n # computed flux check\n # -different standards\n \"\"\" Returned flux density (using old standard) \"\"\"\n # fieldid = 12\n self.assertTrue(self.result[True]['setjyran'].has_key('12'))\n self.check_eq(self.result[True]['setjyran']['12']['1']['fluxd'][0],0.91134687,0.0001)\n \"\"\" Returned flux density (default standard=Perley-Butler 2010) \"\"\"\n self.assertTrue(self.result[False]['setjyran'].has_key('12'))\n #self.check_eq(self.result[False]['setjyran']['12']['1']['fluxd'][0],0.0,0.0001)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[False]['setjyran']['12']['1']['fluxd'][0],1.0510757,0.0001)\n #\n # -manual mode (fluxdensity specification)\n \"\"\" Returned flux density (with input fluxdensity) \"\"\"\n self.assertTrue(self.result['fluxdens']['setjyran'].has_key('12'))\n self.check_eq(self.result['fluxdens']['setjyran']['12']['1']['fluxd'][0],1234.0,0.0001)\n \"\"\" Returned flux density (with input fluxdensity and spix) \"\"\"\n self.assertTrue(self.result['spix']['setjyran'].has_key('12'))\n #self.check_eq(self.result['spix']['setjyran']['12']['1']['fluxd'][0],1233.91240671,0.0001)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result['spix']['setjyran']['12']['1']['fluxd'][0],1234.0328507,0.0001)\n #\n # -for standard='Perley-Butler 2010, with model image\n \"\"\"modimage != '' and fluxdensity == 0 -> no scaling?\"\"\"\n #self.check_eq(self.result[False]['short'], 2.712631, 0.05)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[False]['short'], 1.0508747, 0.05)\n #self.check_eq(self.result[False]['long'], 2.4080808, 0.05)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[False]['long'], 0.9328917, 0.05)\n #\n # -for standard='Perley-Taylor 99' (no model specification is allowed)\n \"\"\"Perley-Taylor 99 standard?\"\"\"\n self.check_eq(self.result[True]['short'], 0.911185, 0.025)\n #self.check_eq(self.result[True]['long'], 0.808885, 0.025)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[True]['long'], 0.9114067, 0.025)\n #\"\"\"modimage != '' and fluxdensity > 0\"\"\" this is no longer supported in the task\n \"\"\"fluxdensity > 0\"\"\" # should be = input fluxdensity for model vis\n self.check_eq(self.result['fluxdens']['short'], 1234.0, 0.05)\n self.check_eq(self.result['fluxdens']['long'], 1234.0, 0.05)\n #\"\"\"modimage != '', fluxdensity > 0, and spix = -0.7\"\"\" with modimage no longer supproted\n \"\"\"fluxdensity > 0, and spix = -0.7\"\"\"\n #self.check_eq(self.result['spix']['short'], 1233.7, 0.5)\n #self.check_eq(self.result['spix']['long'], 1095.2, 0.5)\n self.check_eq(self.result['spix']['short'], 1234.0, 0.5)\n self.check_eq(self.result['spix']['long'], 1234.0, 0.5)\n\n return True",
"def __dowson_hamrock_parameters(r_eff, param_g, param_u, param_w):\n param_ehd = r_eff * param_g ** 0.53 * param_u ** 0.67 * param_w ** -0.067\n return param_ehd",
"def endurance(bsfc, Wtotal=150., Wdry=60., LD=25., CL=0.6, Pe=100., rhofuel=750., S=22.5, rho=0.770816, dt=100):\n\n S *= ft2m**2\n bsfc *= 1./sm2kgkWhr\n Wtotal *= lbs2N\n Wdry *= lbs2N\n\n W, V, Pa, Ptot, Wfuel, t = [], [], [], [], [], []\n\n V.append((2*Wtotal/rho/S/CL)**0.5)\n t.append(0)\n Pa.append(Wtotal/LD*V[-1])\n Ptot.append(Pa[-1] + Pe)\n Wfuel.append(Ptot[-1]*bsfc*dt)\n W.append(Wtotal- Wfuel[-1])\n\n i = 1\n while W[-1] >= Wdry:\n V.append((2*W[-1]/rho/S/CL)**0.5)\n t.append(dt + t[-1])\n Pa.append(W[-1]/LD*V[-1])\n Ptot.append(Pa[-1] + Pe)\n Wfuel.append(Ptot[-1]*bsfc*dt*g)\n W.append(W[-1]- Wfuel[-1])\n i += 1\n\n t, W, V, Pa = np.array(t), np.array(W), np.array(V), np.array(Pa)\n Ptot = np.array(Ptot)\n\n print \"Total Endurance: %.1f [days]\" % (t[-1]*sec2day)\n return {\"time\": {\"units\": \"days\", \"values\": t/60./60./24.},\n \"weight\": {\"units\": \"lbs\", \"values\": W/lbs2N},\n \"speed\": {\"units\": \"kts\", \"values\": V/kts2ms},\n \"flight power\": {\"units\": \"kW\", \"values\": Pa/1000.},\n \"total power\": {\"units\": \"kW\", \"values\": Ptot/1000.}}",
"def calcLivingExtMoisture(self) :\n # do basic sanity check: Live fuels present?\n if (not (LIVE in self.fuelParameters)) or \\\n (self.fuelParameters[LIVE][ONEHR].ovendryLoading == 0) :\n return\n\n self.calcWPrime()\n self.calcMPrime()\n ext = 2.9 * self.wPrime \n ext *= 1. - (self.mPrime / self.extMoisture[DEAD])\n ext -= 0.226\n\n self.setExtMoisture(LIVE, ext)",
"def numerical_HL(self, rho = 804.3, drho = 0.1, T = 218.15, accum = 0.025):\n\n rho = rho\n rhos = np.arange(self.rho_o*1000, rho, drho)\n # rhos = np.array((self.rho_o*1000, rho))\n\n sigma_o = 0.\n\n def dsigma2_dtD_upper(sig, rhos):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_deuterium = firn_diffusivity_instance.deuterium(f_factor_version = self.f_factor_deuterium)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n dsigma2dt = 2./drho_dt*firn_diffusivity_deuterium - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt18_upper(sig, rhos):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o18 = firn_diffusivity_instance.o18(f_factor_version = self.f_factor_o18)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o18 - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt17_upper(sig, rhos):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o17 = firn_diffusivity_instance.o17(f_factor_version = self.f_factor_o17)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o17 - 2./rhos*sig\n return dsigma2dt\n\n\n\n def dsigma2_dtD_lower(sig, rhos, accum):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_deuterium = firn_diffusivity_instance.deuterium(f_factor_version = self.f_factor_deuterium)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n # drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n drho_dt = 1000*self.f1*575*np.exp(-21400/(self.R*T))*np.sqrt(accum)*(self.rho_i - rhos/1000.)\n dsigma2dt = 2./drho_dt*firn_diffusivity_deuterium - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt18_lower(sig, rhos, accum):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o18 = firn_diffusivity_instance.o18(f_factor_version = self.f_factor_o18)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n # drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n drho_dt = 1000*self.f1*575*np.exp(-21400/(self.R*T))*np.sqrt(accum)*(self.rho_i - rhos/1000.)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o18 - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt17_lower(sig, rhos, accum):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o17 = firn_diffusivity_instance.o17(f_factor_version = self.f_factor_o17)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n # drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n drho_dt = 1000*self.f1*575*np.exp(-21400/(self.R*T))*np.sqrt(accum)*(self.rho_i - rhos/1000.)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o17 - 2./rhos*sig\n return dsigma2dt\n\n\n if rho<=550.:\n rhos = np.arange(self.rho_o*1000, rho, drho)\n sigma2_deuterium = sp.integrate.odeint(dsigma2_dtD_upper, 0, rhos)\n sigma2_18 = sp.integrate.odeint(dsigma2_dt18_upper, 0, rhos)\n sigma2_17 = sp.integrate.odeint(dsigma2_dt17_upper, 0, rhos)\n\n elif rho>550.:\n rhos = np.array((self.rho_o*1000, 550.))\n sigma2_deuterium_cr = sp.integrate.odeint(dsigma2_dtD_upper, 0., rhos)[1]\n sigma2_18_cr = sp.integrate.odeint(dsigma2_dt18_upper, 0., rhos)[1]\n sigma2_17_cr = sp.integrate.odeint(dsigma2_dt17_upper, 0., rhos)[1]\n\n rhos = np.arange(550., rho, drho)\n sigma2_deuterium = sp.integrate.odeint(dsigma2_dtD_lower, sigma2_deuterium_cr, rhos, args = (accum,))\n sigma2_18 = sp.integrate.odeint(dsigma2_dt18_lower, sigma2_18_cr, rhos, args = (accum,))\n sigma2_17 = sp.integrate.odeint(dsigma2_dt17_lower, sigma2_17_cr, rhos, args = (accum,))\n\n\n\n\n\n return rhos, np.sqrt(sigma2_deuterium), np.sqrt(sigma2_18), np.sqrt(sigma2_17)",
"def wq_from_file(self, water_quality_raw_data):",
"def rain_heat_flux_FLAWED(rain_rate, tC_sea, tC_air, relhum, pr_air, dter, dqer, dsea):\n Rgas = 287.05 # gas constant [J/kg/K] for dry(!) air\n c2k = 273.15 # celsius to kelvin temperature constant\n cpa = 1004.67 # specific heat capacity of (dry) air [J/kg/K]\n cpw = 4000.0 # specific heat capacity of sw at T=20 degC, S=35 [J/kg/K]\n\n rhoa = air_density(tC_air, pr_air, relhum) # units kg/m^3\n Le = latent_heat_vaporization_pure_water(tC_sea + dsea) # units J/kg\n Qair = met_spechum(tC_air, pr_air, relhum)/1000.0 # units kg/kg\n Qsea = sea_spechum(tC_sea + dsea, pr_air)/1000.0 # units kg/kg\n\n dwat = 2.11e-5 * ((tC_air + c2k) / c2k) ** 1.94 # water vapour diffusivity\n\n dtmp = (1.0 + 3.309e-3 * tC_air -\n 1.44e-6 * tC_air * tC_air) * 0.02411 / (rhoa * cpa) # heat diffusivity\n\n # in Clausius-Clayperon eqn, whoi (DPS) and fortran uses tC_sea (+ dsea);\n # jim edson and archived whoi rain_flux.m use tC_air;\n # some versions include dter in this expression\n\n # this expression affects *most* of the unit tests!\n # this is because rain_heat_flux is used in the warmlayer algorithm, so\n # any data product which uses warmmlayer will fail if a switch is made.\n\n #dqs_dt = Qair * Le / (Rgas * (tC_air + c2k) ** 2) # Clausius-Clapeyron\n dqs_dt = Qsea * Le / (Rgas * (tC_sea + dsea + c2k) ** 2) # Clausius-Clapeyron\n\n alfac = 1.0 / (1.0 + 0.622 * (dqs_dt * Le * dwat) / (cpa * dtmp)) # wet bulb factor\n\n factor = rain_rate * alfac * cpw / 3600.0\n rain_heat_flux = factor * ((tC_sea + dsea - tC_air - dter) +\n (Qsea - Qair - dqer) * Le / cpa)\n\n return rain_heat_flux",
"def frame_skyres(outfil, frame, skymodel, qaframe, quick_look=False):\n from desispec.sky import subtract_sky\n log = get_logger()\n\n # Access metrics\n '''\n wavg_ivar = np.sum(res_ivar,0)\n chi2_wavg = np.sum(wavg_res**2 * wavg_ivar)\n dof_wavg = np.sum(wavg_ivar > 0.)\n pchi2_wavg = scipy.stats.distributions.chi2.sf(chi2_wavg, dof_wavg)\n chi2_med = np.sum(med_res**2 * wavg_ivar)\n pchi2_med = scipy.stats.distributions.chi2.sf(chi2_med, dof_wavg)\n '''\n skyfibers = np.array(qaframe.qa_data['SKYSUB'][\"METRICS\"][\"SKYFIBERID\"])\n subtract_sky(frame, skymodel)\n res=frame.flux[skyfibers]\n res_ivar=frame.ivar[skyfibers]\n if quick_look:\n med_res = qaframe.qa_data['SKYSUB'][\"METRICS\"][\"MED_RESID_WAVE\"]\n wavg_res = qaframe.qa_data['SKYSUB'][\"METRICS\"][\"WAVG_RES_WAVE\"]\n else:\n med_res = np.median(res,axis=0)\n wavg_res = np.sum(res*res_ivar,0) / (np.sum(res_ivar,0) + (np.sum(res_ivar,0)==0))\n\n # Plot\n if quick_look:\n fig = plt.figure(figsize=(8, 10.0))\n gs = gridspec.GridSpec(4,2)\n else:\n fig = plt.figure(figsize=(8, 6.0))\n gs = gridspec.GridSpec(2,2)\n xmin,xmax = np.min(frame.wave), np.max(frame.wave)\n\n # Simple residual plot\n ax0 = plt.subplot(gs[0,:])\n ax0.plot(frame.wave, med_res, label='Median Res')\n ax0.plot(frame.wave, signal.medfilt(med_res,51), color='black', label='Median**2 Res')\n ax0.plot(frame.wave, signal.medfilt(wavg_res,51), color='red', label='Med WAvgRes')\n\n #\n ax0.plot([xmin,xmax], [0., 0], '--', color='gray')\n ax0.plot([xmin,xmax], [0., 0], '--', color='gray')\n ax0.set_xlabel('Wavelength')\n ax0.set_ylabel('Sky Residuals (Counts)')\n ax0.set_xlim(xmin,xmax)\n ax0.set_xlabel('Wavelength')\n ax0.set_ylabel('Sky Residuals (Counts)')\n ax0.set_xlim(xmin,xmax)\n med0 = np.maximum(np.abs(np.median(med_res)), 1.)\n ax0.set_ylim(-5.*med0, 5.*med0)\n #ax0.text(0.5, 0.85, 'Sky Meanspec',\n # transform=ax_flux.transAxes, ha='center')\n\n # Legend\n legend = ax0.legend(loc='upper right', borderpad=0.3,\n handletextpad=0.3, fontsize='small')\n\n # Histogram of all residuals\n ax1 = plt.subplot(gs[1,0])\n xmin,xmax = -5., 5.\n\n # Histogram\n binsz = qaframe.qa_data['SKYSUB'][\"PARAMS\"][\"BIN_SZ\"]\n if 'DEVS_1D' in qaframe.qa_data['SKYSUB'][\"METRICS\"].keys(): # Online\n hist = np.asarray(qaframe.qa_data['SKYSUB'][\"METRICS\"][\"DEVS_1D\"])\n edges = np.asarray(qaframe.qa_data['SKYSUB'][\"METRICS\"][\"DEVS_EDGES\"])\n else: # Generate for offline\n gd_res = res_ivar > 0.\n if not np.any(gd_res):\n log.info(\"No good residuals in frame_skyres plot\")\n edges = None\n else:\n devs = res[gd_res] * np.sqrt(res_ivar[gd_res])\n min_devs = np.maximum(np.min(devs), xmin*2)\n max_devs = np.minimum(np.max(devs), xmax*2)\n i0, i1 = int(min_devs/binsz) - 1, int(max_devs/binsz) + 1\n rng = tuple( binsz*np.array([i0,i1]) )\n nbin = i1-i0\n hist, edges = np.histogram(devs, range=rng, bins=nbin)\n\n if edges is not None:\n xhist = (edges[1:] + edges[:-1])/2.\n ax1.hist(xhist, color='blue', bins=edges, weights=hist)#, histtype='step')\n # PDF for Gaussian\n area = binsz * np.sum(hist)\n xppf = np.linspace(scipy.stats.norm.ppf(0.0001), scipy.stats.norm.ppf(0.9999), 100)\n ax1.plot(xppf, area*scipy.stats.norm.pdf(xppf), 'r-', alpha=1.0)\n\n ax1.set_xlabel(r'Res/$\\sigma$')\n ax1.set_ylabel('N')\n ax1.set_xlim(xmin,xmax)\n\n # Meta text\n #- limit the dictionary to residuals only for meta\n qaresid=copy.deepcopy(qaframe)\n resid_keys=['NREJ','NSKY_FIB','NBAD_PCHI','MED_RESID','RESID_PER']\n qaresid.qa_data['SKYSUB']['METRICS']={key:value for key,value in qaframe.qa_data['SKYSUB']\n ['METRICS'].items() if key in resid_keys}\n\n ax2 = plt.subplot(gs[1,1])\n ax2.set_axis_off()\n show_meta(ax2, qaresid, 'SKYSUB', outfil)\n\n if quick_look:\n #- SNR Plot\n elg_snr_mag = qaframe.qa_data['SKYSUB'][\"METRICS\"][\"ELG_SNR_MAG\"]\n lrg_snr_mag = qaframe.qa_data['SKYSUB'][\"METRICS\"][\"LRG_SNR_MAG\"]\n qso_snr_mag = qaframe.qa_data['SKYSUB'][\"METRICS\"][\"QSO_SNR_MAG\"]\n star_snr_mag = qaframe.qa_data['SKYSUB'][\"METRICS\"][\"STAR_SNR_MAG\"]\n\n ax3 = plt.subplot(gs[2,0])\n ax4 = plt.subplot(gs[2,1])\n ax5 = plt.subplot(gs[3,0])\n ax6 = plt.subplot(gs[3,1])\n\n ax3.set_ylabel(r'Median S/N')\n ax3.set_xlabel('')\n ax3.set_title(r'ELG')\n if len(elg_snr_mag[1]) > 0: #- at least 1 elg fiber?\n select=np.where((elg_snr_mag[1] != np.array(None)) & (~np.isnan(elg_snr_mag[1])) & (np.abs(elg_snr_mag[1])!=np.inf))[0] #- Remove None, nan and inf values in mag\n if select.shape[0]>0:\n\n xmin=np.min(elg_snr_mag[1][select])-0.1\n xmax=np.max(elg_snr_mag[1][select])+0.1\n ax3.set_xlim(xmin,xmax)\n ax3.set_ylim(np.min(elg_snr_mag[0][select])-0.1,np.max(elg_snr_mag[0][select])+0.1)\n ax3.xaxis.set_ticks(np.arange(int(np.min(elg_snr_mag[1][select])),int(np.max(elg_snr_mag[1][select]))+1,0.5))\n ax3.tick_params(axis='x',labelsize=10,labelbottom='on')\n ax3.tick_params(axis='y',labelsize=10,labelleft='on')\n ax3.plot(elg_snr_mag[1][select],elg_snr_mag[0][select],'b.')\n\n ax4.set_ylabel('')\n ax4.set_xlabel('')\n ax4.set_title(r'LRG')\n if len(lrg_snr_mag[1]) > 0: #- at least 1 lrg fiber?\n select=np.where((lrg_snr_mag[1] != np.array(None)) & (~np.isnan(lrg_snr_mag[1])) & (np.abs(lrg_snr_mag[1])!=np.inf))[0]\n if select.shape[0]>0:\n xmin=np.min(lrg_snr_mag[1][select])-0.1\n xmax=np.max(lrg_snr_mag[1][select])+0.1\n ax4.set_xlim(xmin,xmax)\n ax4.set_ylim(np.min(lrg_snr_mag[0][select])-0.1,np.max(lrg_snr_mag[0][select])+0.1)\n ax4.xaxis.set_ticks(np.arange(int(np.min(lrg_snr_mag[1][select])),int(np.max(lrg_snr_mag[1][select]))+1,0.5))\n ax4.tick_params(axis='x',labelsize=10,labelbottom='on')\n ax4.tick_params(axis='y',labelsize=10,labelleft='on')\n ax4.plot(lrg_snr_mag[1][select],lrg_snr_mag[0][select],'r.')\n\n ax5.set_ylabel(r'Median S/N')\n ax5.set_xlabel(r'Mag. (DECAM_R)')\n ax5.set_title(r'QSO')\n if len(qso_snr_mag[1]) > 0: #- at least 1 qso fiber?\n select=np.where((qso_snr_mag[1] != np.array(None)) & (~np.isnan(qso_snr_mag[1])) & (np.abs(qso_snr_mag[1])!=np.inf))[0] #- Remove None, nan and inf values\n if select.shape[0]>0:\n\n xmin=np.min(qso_snr_mag[1][select])-0.1\n xmax=np.max(qso_snr_mag[1][select])+0.1\n ax5.set_xlim(xmin,xmax)\n ax5.set_ylim(np.min(qso_snr_mag[0][select])-0.1,np.max(qso_snr_mag[0][select])+0.1)\n ax5.xaxis.set_ticks(np.arange(int(np.min(qso_snr_mag[1][select])),int(np.max(qso_snr_mag[1][select]))+1,1.0))\n ax5.tick_params(axis='x',labelsize=10,labelbottom='on')\n ax5.tick_params(axis='y',labelsize=10,labelleft='on')\n ax5.plot(qso_snr_mag[1][select],qso_snr_mag[0][select],'g.')\n\n ax6.set_ylabel('')\n ax6.set_xlabel('Mag. (DECAM_R)')\n ax6.set_title(r'STD')\n if len(star_snr_mag[1]) > 0: #- at least 1 std fiber?\n select=np.where((star_snr_mag[1] != np.array(None)) & (~np.isnan(star_snr_mag[1])) & (np.abs(star_snr_mag[1])!=np.inf))[0]\n if select.shape[0]>0:\n xmin=np.min(star_snr_mag[1][select])-0.1\n xmax=np.max(star_snr_mag[1][select])+0.1\n ax6.set_xlim(xmin,xmax)\n ax6.set_ylim(np.min(star_snr_mag[0][select])-0.1,np.max(star_snr_mag[0][select])+0.1)\n ax6.xaxis.set_ticks(np.arange(int(np.min(star_snr_mag[1][select])),int(np.max(star_snr_mag[1][select]))+1,0.5))\n ax6.tick_params(axis='x',labelsize=10,labelbottom='on')\n ax6.tick_params(axis='y',labelsize=10,labelleft='on')\n ax6.plot(star_snr_mag[1][select],star_snr_mag[0][select],'k.')\n\n \"\"\"\n # Meta\n xlbl = 0.1\n ylbl = 0.85\n i0 = outfil.rfind('/')\n ax2.text(xlbl, ylbl, outfil[i0+1:], color='black', transform=ax2.transAxes, ha='left')\n yoff=0.15\n for key in sorted(qaframe.data['SKYSUB']['METRICS'].keys()):\n if key in ['QA_FIG']:\n continue\n # Show\n ylbl -= yoff\n ax2.text(xlbl+0.1, ylbl, key+': '+str(qaframe.data['SKYSUB']['METRICS'][key]),\n transform=ax2.transAxes, ha='left', fontsize='small')\n \"\"\"\n\n\n '''\n # Residuals\n scatt_sz = 0.5\n ax_res = plt.subplot(gs[1])\n ax_res.get_xaxis().set_ticks([]) # Suppress labeling\n res = (sky_model - (true_flux*scl))/(true_flux*scl)\n rms = np.sqrt(np.sum(res**2)/len(res))\n #ax_res.set_ylim(-3.*rms, 3.*rms)\n ax_res.set_ylim(-2, 2)\n ax_res.set_ylabel('Frac Res')\n # Error\n #ax_res.plot(true_wave, 2.*ms_sig/sky_model, color='red')\n ax_res.scatter(wave,res, marker='o',s=scatt_sz)\n ax_res.plot([xmin,xmax], [0.,0], 'g-')\n ax_res.set_xlim(xmin,xmax)\n\n # Relative to error\n ax_sig = plt.subplot(gs[2])\n ax_sig.set_xlabel('Wavelength')\n sig_res = (sky_model - (true_flux*scl))/sky_sig\n ax_sig.scatter(wave, sig_res, marker='o',s=scatt_sz)\n ax_sig.set_ylabel(r'Res $\\delta/\\sigma$')\n ax_sig.set_ylim(-5., 5.)\n ax_sig.plot([xmin,xmax], [0.,0], 'g-')\n ax_sig.set_xlim(xmin,xmax)\n '''\n\n # Finish\n plt.tight_layout(pad=0.1,h_pad=0.0,w_pad=0.0)\n outfile = makepath(outfil)\n plt.savefig(outfil)\n plt.close()\n print('Wrote QA SkyRes file: {:s}'.format(outfil))",
"def Y2W(r, Y, mode, F): #im ana, and i want to make some mess in my boyfriend's code :)\n\n [h, vr] = Y\n Fphi, Fz = F(r)[2:]\n\n kappa = mode.disk.kappa(r)\n Omega = mode.disk.Omega(r)\n Omegav = mode.disk.Omegav(r)\n dkappa = mode.disk.dkappa(r)\n c = mode.disk.cs\n \n m, n = mode.m, mode.n\n omegat = mode.omegat(r)\n \n [h, vr] = Y\n vphi = -(-2*h*m*Omega + 1j*(-2*Fphi*Omega*r**2 + kappa**2*r*vr))/(2.*Omega*omegat*r**2) \n vz = 1j*(c*Fz - h*n*Omegav)/(c*omegat) \t \n \n # solution vector:\n W = np.array([h, vr, vphi, vz])\n \n # derivatives of h and vr are calculated by calling ode_rhs:\n [dh, dvr] = ode_rhs(r, Y, mode, F)\n\n # derivative of the force:\n dFphi, dFz = F.der(r)[2:]\n \n # derivatives of the two other velocities are: \n \n dvphi = (-(-2*dh*m*Omega - 2*h*m*(kappa**2/(2.*Omega*r) - (2*Omega)/r) + \n 1j*(dvr*kappa**2*r - 4*Fphi*Omega*r - 2*dFphi*Omega*r**2 - \n 2*Fphi*(kappa**2/(2.*Omega*r) - (2*Omega)/r)*r**2 + kappa**2*vr + 2*dkappa*kappa*r*vr))/\n (2.*Omega*omegat*r**2) + (-2*h*m*Omega + 1j*(-2*Fphi*Omega*r**2 + kappa**2*r*vr))/\n (Omega*omegat*r**3) - (m*(kappa**2/(2.*Omega*r) - (2*Omega)/r)*\n (-2*h*m*Omega + 1j*(-2*Fphi*Omega*r**2 + kappa**2*r*vr)))/(2.*Omega*omegat**2*r**2) + \n ((kappa**2/(2.*Omega*r) - (2*Omega)/r)*(-2*h*m*Omega + 1j*(-2*Fphi*Omega*r**2 + kappa**2*r*vr)))/\n (2.*Omega**2*omegat*r**2))\n\n dvz = (0.5j*(c*(Fz*m*(kappa**2 - 4*Omega**2) + 2*dFz*Omega*omegat*r) - \n n*Omegav*(h*m*(kappa**2 - 4*Omega**2) + 2*dh*Omega*omegat*r)))/(c*Omega*omegat**2*r)\n \n dW =np.array([dh, dvr, dvphi, dvz])\n \n return [W, dW]",
"def calculate_soil_water_fac(self):\n # turn into fraction...\n smc_topsoil = self.state.pawater_topsoil / self.params.wcapac_topsoil\n smc_root = self.state.pawater_root / self.params.wcapac_root\n \n if self.control.sw_stress_model == 0:\n wtfac_topsoil = smc_topsoil**self.params.qs \n wtfac_root = smc_root**self.params.qs \n \n elif self.control.sw_stress_model == 1:\n wtfac_topsoil = self.calc_sw_modifier(smc_topsoil, \n self.params.ctheta_topsoil, \n self.params.ntheta_topsoil)\n \n wtfac_root = self.calc_sw_modifier(smc_root, \n self.params.ctheta_root, \n self.params.ntheta_root)\n \n elif self.control.sw_stress_model == 2:\n \n # Stomatal limitaiton\n # Exponetial function to reduce g1 with soil water limitation\n # based on Zhou et al. 2013, AFM, following Makela et al 1996.\n # For the moment I have hardwired the PFT parameter as I am still\n # testing.\n # Because the model is a daily model we are assuming that LWP is\n # well approximated by the night SWP.\n \n if float_eq(smc_topsoil, 0.0):\n psi_swp_topsoil = -1.5\n else:\n arg1 = self.params.psi_sat_topsoil\n arg2 = smc_topsoil /self.params.theta_sat_topsoil\n arg3 = -self.params.b_topsoil\n psi_swp_topsoil = arg1 * arg2**arg3\n \n if float_eq(smc_root, 0.0):\n psi_swp_root = -1.5\n else:\n arg1 = self.params.psi_sat_root\n arg2 = smc_root/self.params.theta_sat_root\n arg3 = -self.params.b_root\n psi_swp_root = arg1 * arg2**arg3\n \n # multipliy these by g1, same as eqn 3 in Zhou et al. 2013.\n b = 0.66\n \n wtfac_topsoil = exp(b * psi_swp_topsoil)\n wtfac_root = exp(b * psi_swp_root)\n \n #print self.state.pawater_root,wtfac_root \n return (wtfac_topsoil, wtfac_root)",
"def process(self, plotcheck=False):\n #wave, templateflux, nly = self.read_template()\n wave, templateflux = self.read_template()\n\n # apply ISM extinction\n ism = ISMExtinction()\n if self.ext_law == 0:\n # Calzetti et al. 2000\n reddenedflux = ism.calz(wave, templateflux, self.ebv)\n self.extstr = 'calz'\n elif self.ext_law == 1:\n # Cardelli, Clayton & Mathis (CCM) 1989 - clumpy ISM\n # the optical depth of the clumps as a function of wavelength\n # will follow the CCM extinction law\n\n# # following Natta & Panagia 1984, ApJ, 287, 228\n# # I_obs/I_init = exp(-nclumps*(1-exp(-tau_clumps))) = exp(-tau_eff)\n# # calculate effective optical depth in V band\n# tauv_eff = self.nclumps * (1. - np.exp(-self.tauv_clumps))\n# # E_{B-V} = AV / RV = (1.086 * tauv_eff) / RV, RV = 3.1\n# ebv_eff = (1.086 * tauv_eff) / 3.1\n \n taulam_clump = ism.ccm(wave, self.tauv_clump)\n # redden according to Natta & Panagia 1984, Scarlata et al. 2009\n factor = -self.nclumps * (1. - np.exp(-taulam_clump))\n reddenedflux = templateflux * np.exp(factor)\n self.extstr = 'ccm'\n\n # IGM attenuation from Inoue et al.\n igm = IGMAttenuation()\n ext = igm.inoue(wave, self.z)\n extflux = reddenedflux * ext\n \n # normalize spectrum to wavelength at which LF is calculated\n flux = self.lum / (4. * np.pi * self.dlum**2 * (1.+self.z))\n # scale full template SED so that the flux at lambda=self.lf.mwave\n # is the flux randomly assigned from the cumulative LF\n\n # find flux at LF wavelength\n # taken an average in a bin +/- 50 A from center\n wlam = self.find_thing(wave, self.lf.mwave)\n wlam1 = self.find_thing(wave, self.lf.mwave-250.)\n wlam2 = self.find_thing(wave, self.lf.mwave+250.)\n avgflux = np.mean(extflux[wlam1:wlam2])\n \n # rescale\n scale = flux / avgflux\n outflux = extflux * scale\n\n if plotcheck:\n # input spectrum\n plt.plot(wave, templateflux)\n plt.xlim(0,30000)\n # reddened and attenuated spectrum\n plt.plot(wave, reddenedflux, '#ffa500')\n plt.plot(wave, extflux, 'r')\n # wavelength range used for flux normalization\n plt.vlines([wave[wlam], wave[wlam1], wave[wlam2]], 0, 1.e-20)\n plt.hlines(avgflux, wave[wlam1], wave[wlam2])\n # output spectrum\n plt.plot(wave, outflux, 'r--')\n \n return wave, outflux",
"def expsfh(tq, tau, time):\n ssfr = 2.5*(((10**10.27)/1E10)**(-0.1))*(time/3.5)**(-2.2)\n c = np.apply_along_axis(lambda a: a.searchsorted(3.0), axis = 0, arr = time) \n ssfr[:c.flatten()[0]] = np.interp(3.0, time.flatten(), ssfr.flatten())\n c_sfr = np.interp(tq, time.flatten(), ssfr.flatten())*(1E10)/(1E9)\n ### definition is for 10^10 M_solar galaxies and per gyr - convert to M_solar/year ###\n sfr = np.ones_like(time)*c_sfr\n mask = time <= tq\n sfrs = np.ma.masked_array(sfr, mask=mask)\n times = np.ma.masked_array(time-tq, mask=mask)\n sfh = sfrs*np.exp(-times/tau)\n return sfh.data",
"def weather_param(file, epsilon, Format, S):\n df_weather = weather_data_to_df(file, S['Period_start'], S['Period_end'], S['Time_step'])\n df_weather.drop(df_weather.tail(1).index, inplace=True)\n \n # External temperature - format Ext_T[Day_index,Hour_index]\n Ext_T = reshape_day_hour((df_weather['Temperature'].values), *Format)\n Ext_T[np.abs(Ext_T) < epsilon] = 0\n \n # Global irradiance\n Irradiance = reshape_day_hour((df_weather['Irradiance'].values), *Format)\n Irradiance[np.abs(Irradiance) < epsilon] = 0\n \n return Ext_T, Irradiance, df_weather.index",
"def spectral_fwhm(self):\n wave = self.central_wavelength\n return wave / self.info.instrument.spectral_resolution",
"def energy_inf(self):\n\n if self.x**2 != 1:\n print('Off axis orbits are not currently supported.')\n elif self.harmonic is None:\n print('Unable to compute energy values without setting modes.')\n # else:\n # self.harmonic_key = tuple((self.harmonic['ell'], self.harmonic['em']))\n # self.mode_key = tuple((self.mode['kay'], self.mode['en']))\n # print(self.harmonic_key)\n # print(self.mode_key)\n # if self.harmonic_key in self.mode_content.keys():\n # if self.mode_key in self.mode_content[self.harmonic_key].keys():\n # print('Mode has already been computed and has been skipped.')\n # TODO (aaron): add some logic to skip modes that have already been computed\n else:\n self.omega = fp_find_omega(self.omega_r, self.omega_theta, self.omega_phi, self.em, self.kay, self.en)\n self.re_nu, self.im_nu = calc_nu(self.aa, self.slr, self.ecc, self.x, self.ell, self.en, self.em, self.kay)\n\n self.eigen, self.Slm, self.Slmd, self.Slmdd = calc_swsh_eq(self.aa, self.omega, self.ell, self.em, -2)\n\n self.nu = self.re_nu + 1j * self.im_nu\n self.Bin = py_find_Bin(self.re_nu, self.im_nu, self.eigen, self.aa, self.omega, self.em)\n\n self.mode_dependent = {'gw_freq':self.omega, 'eigen':self.eigen, 'nu':self.nu, 'Bin':self.Bin}\n\n self.e_inf, self.Z = flux_inf(self.nu, self.Bin, self.eigen, self.slr, self.ecc, self.aa, self.ups_r, self.ups_theta,\n self.ups_phi, self.gamma, self.omega, self.em, self.Lz, self.En, self.Slm, self.Slmd,\n self.Slmdd, self.omega_r, self.r1, self.r2, self.r3, self.r4, self.zp, self.zm)\n if np.isnan(self.e_inf): # TODO (aaron): investigate why this can be nan\n print('Some value is NaN - this needs to be investigated.')\n self.e_inf = 0\n print('Energy at infinity stored as zero:', self.e_inf)\n\n elif self.double:\n self.e_inf = 2 * self.e_inf\n print('Energy at infinity:', self.e_inf)\n\n # put everything in a dict to save later as a json/hdf5\n self.harmonic_key = tuple((self.harmonic['ell'], self.harmonic['em']))\n self.mode_key = tuple((self.mode['kay'], self.mode['en']))\n\n if self.harmonic_key in self.mode_content.keys():\n self.mode_content[self.harmonic_key][self.mode_key] = self.e_inf\n else:\n self.mode_content[self.harmonic_key] = {self.mode_key: self.e_inf}",
"def init_galactic_extinction(self, MW_EBV=0., R_V=utils.MW_RV):\n self.MW_F99 = None\n if MW_EBV > 0:\n self.MW_F99 = utils.MW_F99(MW_EBV*R_V, r_v=R_V)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read in the f_RH data from netCDF file EW 21/02/17
|
def read_f_RH(mod_time, ceil_lam):
# file name and path
if sys.platform == 'linux2':
miedir = '/data/jcmm1/ewarren/Mie/'
else:
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'
filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'
# read data
# f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])
f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])
return f_RH
|
[
"def read_hourly_f_RH(mod_time, ceil_lam):\n\n import sys.platform as platform\n\n # file name and path\n if platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename)\n return f_RH",
"def read_ncep(ncdf_path,year):\r\n\r\n # path to the netcdf files\r\n ncdf_AT_file = os.path.join(ncdf_path,'.'.join(['air','{:0>4}'.format(year),'nc']))\r\n ncdf_GH_file = os.path.join(ncdf_path,'.'.join(['hgt','{:0>4}'.format(year),'nc']))\r\n ncdf_SH_file = os.path.join(ncdf_path,'.'.join(['shum','{:0>4}'.format(year),'nc']))\r\n\r\n print('Read global',year,'NCEP data ...')\r\n # Air Temperature\r\n DATA = read_data(netCDF4.Dataset(ncdf_AT_file,'r'), ['air'])\r\n if len(DATA['air']) < 17:\r\n print('Need 17 levels of AT data: found only ',len(lev_AT))\r\n\r\n # Specific Humidity\r\n SHUM_DATA = read_data(netCDF4.Dataset(ncdf_SH_file,'r'), ['shum'])\r\n if len(SHUM_DATA['level']) < 8:\r\n print('Need 8 levels of SH data: found only ',len(lev_SH))\r\n\r\n if list(SHUM_DATA['level'])!=list(DATA['level'][:len(SHUM_DATA['level'])]):\r\n print('Warning: air and shum do not share the same lower pressure levels')\r\n\r\n DATA.update(SHUM_DATA)\r\n\r\n # Geopotential Height\r\n GH_DATA = read_data(netCDF4.Dataset(ncdf_GH_file,'r'), ['hgt'])\r\n if len(GH_DATA['level']) < 17:\r\n print('Need 17 levels of GH data: found only ',len(lev_GH))\r\n\r\n DATA.update(GH_DATA)\r\n\r\n for key in DATA:\r\n if 'air' in key:\r\n DATA[key.replace('air','T')] = DATA[key]\r\n del DATA[key]\r\n if 'hgt' in key:\r\n DATA[key.replace('hgt','H')] = DATA[key]\r\n del DATA[key]\r\n if 'shum' in key:\r\n DATA[key.replace('shum','QV')] = DATA[key]\r\n del DATA[key]\r\n\r\n DATA['lev'] = DATA['level']\r\n del DATA['level']\r\n\r\n return DATA",
"def read_f_RH(ceil_lam):\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH",
"def read(self):\n f = netCDF4.Dataset(self.path2ncfile)\n print(f) # similar to ncdump -h\n \n# Access a netcdf variables:\n# variable objects stored by name in variables dict.\n# print the variable yields summary info (including all the attributes).\n# no actual data read yet (just have a reference to the variable object with metadata).\n\n print(f.variables.keys()) # get all variable names\n #band1var = f.variables['band1'] # temperature variable\n band1var = f.variables['time'] # temperature variable\n print(band1var) \n print(band1var[:])\n\n self.showinfo(f, 'time') \n self.showinfo(f, 'longitude') \n self.showinfo(f, 'latitude') \n #self.showinfo(f,'extra_metadata')",
"def read_calfits( f ):\n hdu = pf.open( f )\n wlmin = hdu[0].header['crval1']\n res = hdu[0].header['cdelt1']\n length = hdu[0].data.shape[-1]\n wl = np.arange( wlmin, wlmin+res*length, res )\n fl = hdu[0].data[0]\n er = hdu[0].data[1]\n return wl,fl,er",
"def read_obs_hmv_declination(obscode, year_st, year_fn, folder):\n\n OBSY = obscode.upper()\n obsy = obscode.lower()\n # Read in the observatory data one year file at a time and construct filenames\n datareq = pd.DataFrame()\n for year in range(year_st, year_fn+1):\n ystr = str(year)\n file = obsy + ystr + 'dhor.hor'\n fpf = folder + '/' + file\n tmp = IAGA2002_Data_Reader(fpf)\n tmp.columns = [col.strip(OBSY) for col in tmp.columns]\n tmp = tmp.replace(99999.00, np.nan)\n # Calculate D (in degrees) if not given in the file\n if('D' not in tmp.columns):\n dvals, hvals, ivalsm, fvals = xyz2dhif(tmp['X'], tmp['Y'], tmp['Z'])\n tmp.insert(loc=1, column='D', value=dvals.values)\n else:\n # Convert the reported values to degrees\n tmp['D'] = tmp.D.values/60.0\n datareq = datareq.append(tmp[['D']])\n return(datareq)",
"def read_wxt_obs(day, time, z):\n\n filepath = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + day.strftime('%Y') + '_15min.nc'\n wxt_obs = eu.netCDF_read(filepath, vars=['time', 'RH', 'Tair', 'press'])\n\n # extract out RH obs to match mod_time\n # pull out ALL the nearest time idxs and differences\n # the mod_data time is the same for all sites so can therefore use any site\n t_idx = np.array([eu.nearest(wxt_obs['time'], t)[1] for t in time])\n t_diff = np.array([eu.nearest(wxt_obs['time'], t)[2] for t in time])\n\n wxt_obs['RH'] = wxt_obs['RH'][t_idx] # [%]\n wxt_obs['Tair'] = wxt_obs['Tair'][t_idx] # [degC]\n wxt_obs['press'] = wxt_obs['press'][t_idx] # [hPa]\n wxt_obs['time'] = wxt_obs['time'][t_idx]\n # wxt_obs['rawtime'] = wxt_obs['rawtime'][t_idx]\n\n # overwrite t_idx locations where t_diff is too high with nans\n # only keep t_idx values where the difference is below 1 hour\n bad = np.array([abs(i.days * 86400 + i.seconds) > 60 * 60 for i in t_diff])\n\n wxt_obs['RH'][bad] = np.nan\n wxt_obs['Tair'][bad] = np.nan\n wxt_obs['press'][bad] = np.nan\n\n wxt_obs['time'][bad] = np.nan\n # wxt_obs['rawtime'][bad] = np.nan\n\n # create RH_frac using RH data\n wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0\n\n # calculate extra variables\n e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure\n e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure\n wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure\n wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]\n wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]\n wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]\n wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]\n\n # extend the wxt obs in height to match the dimensions of model RH\n # copy the obs so it is the same at all heights\n for var, item in wxt_obs.iteritems():\n if var not in ['time', 'rawtime']:\n # wxt_obs[var] = np.transpose(np.tile(item, (int(rh_frac.shape[1]), 1)))\n wxt_obs[var] = np.transpose(np.tile(item, (int(z.shape[-1]), 1)))\n\n return wxt_obs",
"def read_cg5(fh):\n meter, oper = None, None\n all_survey_data = ChannelList()\n\n for i, orig_line in enumerate(fh, 1):\n try:\n # Clean line\n line = orig_line.strip()\n\n # Skip blank and comment lines\n if (not line) or (line[0] == \"L\"):\n continue\n\n # Header line; look for useful information\n if line[0] == \"/\":\n vals_temp = line.split()\n if len(vals_temp) > 1:\n if vals_temp[1] == \"Instrument\":\n meter = vals_temp[-1]\n if vals_temp[1] == \"Operator:\":\n oper = vals_temp[-1]\n continue\n\n # parse string line first with respect to '/' characters (used in the date format),\n # then with ':' (used for the time display), eventually with the classic ' '\n vals_temp1 = line.split(\"/\")\n vals_temp2 = vals_temp1[0].split(\":\")\n vals_temp3 = vals_temp2[0].split()\n vals_temp4 = vals_temp2[2].split()\n\n # fill object properties:\n all_survey_data.line.append(float(vals_temp3[0]))\n s = vals_temp3[1].replace(\".0000000\", \"\")\n all_survey_data.station.append(s.strip())\n all_survey_data.elev.append(float(vals_temp3[2]))\n all_survey_data.raw_grav.append(\n float(vals_temp3[3]) * 1000.0 - float(vals_temp3[8]) * 1000.0\n ) # convert to microGal; remove tide correction\n all_survey_data.tare.append(0)\n all_survey_data.sd.append(float(vals_temp3[4]) * 1000.0)\n all_survey_data.tiltx.append(float(vals_temp3[5]))\n all_survey_data.tilty.append(float(vals_temp3[6]))\n all_survey_data.temp.append(float(vals_temp3[7]))\n all_survey_data.etc.append(float(vals_temp3[8]) * 1000.0)\n all_survey_data.meter_etc.append(float(vals_temp3[8]) * 1000.0)\n all_survey_data.dur.append(int(vals_temp3[9]))\n all_survey_data.rej.append(int(vals_temp3[10]))\n all_survey_data.t.append(\n date2num(\n dt.datetime(\n int(vals_temp4[3]),\n int(vals_temp1[1]),\n int(vals_temp1[2]),\n int(vals_temp3[11]),\n int(vals_temp2[1]),\n int(vals_temp4[0]),\n )\n )\n )\n\n all_survey_data.meter.append(meter or \"-999\")\n all_survey_data.oper.append(oper or \"-999\")\n\n all_survey_data.keepdata.append(1)\n except (IndexError, ValueError) as e:\n logging.exception(\"Error loading CG5 file at line %d\", i)\n logging.info(\"LINE: %s\", line)\n\n e.i = i\n e.line = orig_line\n raise e\n except ValueError as e:\n e.i = i\n e.line = orig_line\n raise e\n all_survey_data.meter_type = \"CG5\"\n return all_survey_data",
"def read_cg6(fh):\n meter, oper = None, None\n all_survey_data = ChannelList()\n\n for i, orig_line in enumerate(fh, 1):\n try:\n line = orig_line.strip()\n vals_temp = line.split(\"\\t\")\n if line[0] == \"/\":\n vals_temp = line.split()\n if len(vals_temp) > 1:\n if vals_temp[1] == \"Instrument\":\n meter = vals_temp[-1]\n if vals_temp[1] == \"Operator:\":\n oper = vals_temp[-1]\n continue\n # Numbers are columns in the imported file\n c_station, c_date, c_time, c_sd = 0, 1, 2, 5\n c_tiltx, c_tilty = 8, 9\n c_tide, c_tilt, c_temp = 11, 12, 13\n c_dur = 15\n c_grav, c_elev, c_lat, c_long = 3, 19, 17, 18\n\n date_temp = vals_temp[c_date].split(\"-\")\n time_temp = vals_temp[c_time].split(\":\")\n\n # fill object properties:\n all_survey_data.line.append(0.0)\n all_survey_data.station.append(vals_temp[c_station].strip())\n all_survey_data.elev.append(float(vals_temp[c_elev]))\n all_survey_data.lat.append(float(vals_temp[c_lat]))\n all_survey_data.long.append(float(vals_temp[c_long]))\n all_survey_data.raw_grav.append(\n float(vals_temp[c_grav]) * 1000.0 - float(vals_temp[c_tide]) * 1000.0\n )\n all_survey_data.tare.append(0)\n all_survey_data.etc.append(float(vals_temp[c_tide]) * 1000.0)\n all_survey_data.meter_etc.append(float(vals_temp[c_tide]) * 1000.0)\n all_survey_data.sd.append(float(vals_temp[c_sd]) * 1000.0)\n all_survey_data.meter.append(meter)\n all_survey_data.tiltx.append(float(vals_temp[c_tiltx]) * 1000.0)\n all_survey_data.tilty.append(float(vals_temp[c_tilty]) * 1000.0)\n all_survey_data.temp.append(float(vals_temp[c_temp]) * 1000.0)\n all_survey_data.dur.append(int(vals_temp[c_dur]))\n all_survey_data.rej.append(5)\n all_survey_data.t.append(\n date2num(\n dt.datetime(\n int(date_temp[0]),\n int(date_temp[1]),\n int(date_temp[2]),\n int(time_temp[0]),\n int(time_temp[1]),\n int(time_temp[2]),\n )\n )\n )\n\n all_survey_data.meter.append(meter or \"-999\")\n all_survey_data.oper.append(oper or \"-999\")\n\n all_survey_data.keepdata.append(1)\n\n except (IndexError, ValueError) as e:\n logging.exception(\"Error loading CG6 file at line %d\", i)\n logging.info(\"LINE: %s\", line)\n e.i = i\n e.line = orig_line\n raise e\n all_survey_data.meter_type = \"CG6\"\n return all_survey_data",
"def readNetCDF_Forecast(infile, outfile, monf, fyr, tgti, tgtf, tar, wlo1, elo1, sla1, nla1):\n ds=xr.open_dataset(infile,decode_times=False)\n da=list(ds.coords)\n\n for i in range(len(da)):\n if da[i]=='X' or da[i]=='lon' or da[i]=='longitude':\n ds = ds.rename({da[i]:'X'})\n if da[i]=='Y' or da[i]=='lat' or da[i]=='latitude':\n ds = ds.rename({da[i]:'Y'})\n if da[i]=='S':\n deltastyr=int(ds[da[i]][0]/12)\n nmon=ds.S.shape[0]\n nyr=int(nmon/12)\n if 'months since' in ds.S.units:\n line=ds.S.units\n stdate=str(int(line.split()[2][:4])+deltastyr)+line.split()[2][-6:]\n ds['S'] = pd.date_range(stdate, periods=ds.S.shape[0], freq='M')\n\n ds1=ds.sel(X=slice(wlo1,elo1),Y=slice(sla1,nla1),L=slice(float(tgti),float(tgtf))).mean(dim='L',skipna=True)\n ds2=ds1.mean(dim='M',skipna=True)\n Xarr=ds2.X.values\n Yarr=ds2.Y.values\n W=ds2.X.shape[0]\n H=ds2.Y.shape[0]\n a=list(ds)\n\n var1=ds2[a[0]]\n units=ds[a[0]].units\n Ti=fyr\n\n vari = a[0]\n varname = vari\n L=0.5*(float(tgtf)+float(tgti))\n\n monthdic = {'Jan':'01','Feb':'02','Mar':'03','Apr':'04','May':'05','Jun':'06','Jul':'07','Aug':'08','Sep':'09','Oct':'10','Nov':'11','Dec':'12'}\n S1=monthdic[monf]\n mi=monthdic[tar.split(\"-\")[0]]\n mf=monthdic[tar.split(\"-\")[1]]\n\n var1_stmon=var1[(var1.S.dt.month==int(monthdic[monf]))]\n var=var1_stmon.groupby(var1_stmon.S.dt.year).mean(dim=('S')).sel(year=fyr)\n var_N2S=var.reindex(Y=var.Y[::-1])\n Yarr=var_N2S.Y.values\n if tar=='Dec-Feb' or tar=='Nov-Jan': #double check years are sync\n xyear=True #flag a cross-year season\n else:\n xyear=False\n T=1\n Tarr = np.arange(Ti, Ti+T)\n\n if 'True' in np.isnan(var):\n var[np.isnan(var)]=-999. #use CPT missing value\n #Now write the CPT file\n outfile=\"usr_fcst_\"+a[0]+\"_\"+tar+\"_ini\"+monf+str(fyr)+\".tsv\"\n f = open(outfile, 'w')\n f.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n f.write(\"cpt:nfields=1\\n\")\n\n for it in range(T):\n if xyear==True:\n f.write(\"cpt:field=\"+vari+\", cpt:L=\"+str(L)+\" months, cpt:S=\"+str(Tarr[it])+\"-\"+S1+\"-01T00:00, cpt:T=\"+str(Tarr[it])+\"-\"+mi+\"/\"+str(Tarr[it]+1)+\"-\"+mf+\", cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n else:\n f.write(\"cpt:field=\"+vari+\", cpt:L=\"+str(L)+\" months, cpt:S=\"+str(Tarr[it])+\"-\"+S1+\"-01T00:00, cpt:T=\"+str(Tarr[it])+\"-\"+mi+\"/\"+mf+\", cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n np.savetxt(f, Xarr, fmt=\"%.6f\",newline='\\t')\n f.write(\"\\n\") #next line\n for iy in range(H):\n np.savetxt(f,np.r_[Yarr[iy],var_N2S[iy,0:]],fmt=\"%.6f\", newline='\\t') #excise extra line\n f.write(\"\\n\") #next line\n f.close()",
"def read_cg6tsoft(fh):\n\n meter, oper = None, None\n all_survey_data = ChannelList()\n station_name = None\n for i, orig_line in enumerate(fh, 1):\n try:\n line = orig_line.strip()\n vals_temp = line.split()\n\n if line[0] == \"/\":\n if len(vals_temp) > 1:\n if vals_temp[1] == \"Instrument\":\n meter = vals_temp[-1]\n if vals_temp[1] == \"Operator:\":\n oper = vals_temp[-1]\n if vals_temp[1] == \"Station:\":\n station_name = vals_temp[-1]\n continue\n\n # Numbers are columns in the imported file\n c_tiltx, c_tilty = 12, 13\n c_tide, c_tilt, c_temp = 19, 18, 14\n c_lat, c_long, c_elev = 7, 8, 9\n c_grav = 11 # CorrGravity channel\n\n # fill object properties:\n if station_name:\n all_survey_data.station.append(station_name)\n all_survey_data.elev.append(float(vals_temp[c_elev]))\n all_survey_data.lat.append(float(vals_temp[c_lat]))\n all_survey_data.long.append(float(vals_temp[c_long]))\n all_survey_data.raw_grav.append(float(vals_temp[c_grav]) * 1000.0)\n all_survey_data.tare.append(0)\n all_survey_data.etc.append(float(vals_temp[c_tide]) * 1000.0)\n all_survey_data.meter_etc.append(float(vals_temp[c_tide]) * 1000.0)\n all_survey_data.sd.append(\n -999\n ) # SD not exported in Tsoft format?? It is in regular format\n all_survey_data.meter.append(meter)\n all_survey_data.tiltx.append(float(vals_temp[c_tiltx]) * 1000.0)\n all_survey_data.tilty.append(float(vals_temp[c_tilty]) * 1000.0)\n all_survey_data.temp.append(float(vals_temp[c_temp]) * 1000.0)\n # cols 0, 1, 2, 3, 4, 5 = year, month, day, hour, minute, second\n temp_date_ints = (int(i) for i in vals_temp[:6])\n all_survey_data.t.append(date2num(dt.datetime(*temp_date_ints)))\n\n all_survey_data.meter.append(meter or \"-999\")\n all_survey_data.oper.append(oper or \"-999\")\n\n all_survey_data.keepdata.append(1)\n all_survey_data.dur.append(-999)\n all_survey_data.rej.append(-999)\n\n except (IndexError, ValueError) as e:\n logging.exception(\"Error loading CG6TSoft file %s, at line %d\", fname, i)\n logging.info(\"LINE: %s\", line)\n e.i = i\n e.line = orig_line\n raise e\n\n if all_survey_data.raw_grav:\n all_survey_data.meter_type = \"CG6Tsoft\"\n return all_survey_data\n\n else:\n raise ValueError",
"def read_data(fdh):\r\n print(\"READING FILTERED DATA\")\r\n arcpy.env.workspace = scratch\r\n fiber = []\r\n scs = []\r\n conduit = []\r\n vaults = []\r\n trench = []\r\n\r\n fcs = [\"final_fiber\", \"final_scs\", \"final_con\", \"final_vaults\", \"trench\"]\r\n cur_fields = [fiber_fields, sc_fields, con_fields, vault_fields, trench_fields]\r\n out_data = [fiber, scs, conduit, vaults, trench]\r\n # out_data = []\r\n i = 0\r\n domains = arcpy.da.ListDomains(scratch)\r\n for fc, req_fields, out_fc in zip(fcs, cur_fields, out_data):\r\n # print \"Read FINAL FC: {0}\".format(fc)\r\n out_data.append([])\r\n allfields = arcpy.ListFields(fc)\r\n field_map = {o.name.lower(): o for o in allfields}\r\n fields = [field_map[x.lower()] for x in req_fields]\r\n with arcpy.da.SearchCursor(scratch + \"\\\\\" + fc, [x.name for x in fields],\r\n \"fdhid like '{0}'\".format(fdh)) as cursor:\r\n for row in cursor:\r\n print(row)\r\n temp = [\"\" if x is None else (x if fields[ind].domain == \"\" else\r\n [y for y in domains if y.name == fields[ind].domain][0].codedValues[x])\r\n for ind, x in enumerate(row)]\r\n for i, z in enumerate(temp):\r\n try:\r\n temp[i] = float(z)\r\n except:\r\n temp[i] = z\r\n out_fc.append(temp)\r\n #print(\"Read FINAL FC: {0} len: {1}\".format(fc, len(out_fc)))\r\n\r\n return (fiber, scs, conduit, vaults, trench)",
"def f_read_htk(filename, data_format='f4', end='l'):\n if end=='l':\n data_format = '<'+data_format\n data_formatInt4 = '<i4'\n data_formatInt2 = '<i2'\n elif end=='b':\n data_format = '>'+data_format\n data_formatInt4 = '>i4'\n data_formatInt2 = '>i2'\n else:\n data_format = '='+data_format\n data_formatInt4 = '=i4'\n data_formatInt2 = '=i2'\n\n head_type = np.dtype([('nSample',data_formatInt4), \n ('Period',data_formatInt4),\n ('SampleSize',data_formatInt2), \n ('kind',data_formatInt2)])\n f = open(filename,'rb')\n head_info = np.fromfile(f,dtype=head_type,count=1)\n \n \"\"\"if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n \"\"\" \n if 'f' in data_format:\n sample_size = int(head_info['SampleSize'][0]/4)\n else:\n print(\"Error in read_htk: input should be float32\")\n return False\n \n datatype = np.dtype((data_format,(sample_size,)))\n data = np.fromfile(f,dtype=datatype)\n f.close()\n return data",
"def readRMRCS(filename, headonly=False, **kwargs):\n starttime = kwargs.get('starttime')\n endtime = kwargs.get('endtime')\n getfile = True\n\n debug = kwargs.get('debug')\n debug = True\n if debug:\n print (\"RCS: found data from Richards Perl script\")\n\n #fh = open(filename, 'r', encoding='utf-8', newline='', errors='ignore')\n #fh = open(filename, 'r', newline='')\n fh = open(filename, 'rb')\n # read file and split text into channels\n # --------------------------------------\n stream = DataStream()\n headers = {}\n array = [[] for key in KEYLIST]\n data = []\n measurement = []\n unit = []\n i = 0\n key = None\n\n # try to get day from filename (platform independent)\n # --------------------------------------\n theday = extractDateFromString(filename)\n try:\n if starttime:\n if not theday[-1] >= datetime.date(stream._testtime(starttime)):\n getfile = False\n if endtime:\n if not theday[0] <= datetime.date(stream._testtime(endtime)):\n getfile = False\n except:\n # Date format not recognized. Need to read all files\n getfile = True\n\n if getfile:\n for line in fh:\n line = line.decode('utf-8','ignore')\n if line.isspace():\n # blank line\n pass\n elif line.startswith('# RCS Fieldpoint'):\n # data header\n fieldpoint = line.replace('# RCS Fieldpoint','').strip()\n elif line.startswith('#'):\n # data header\n colsstr = line.split(',')\n if (len(colsstr) == 3):\n # select the lines with three komma separeted parts -> they describe the data\n meastype = colsstr[1].split()\n unittype = colsstr[2].split()\n measurement.append(meastype[2])\n unit.append(unittype[2])\n headers['col-'+KEYLIST[i+1]] = measurement[i]\n headers['unit-col-'+KEYLIST[i+1]] = unit[i]\n if headers['unit-col-'+KEYLIST[i+1]] == '--':\n headers['unit-col-'+KEYLIST[i+1]] = ''\n i=i+1\n elif headonly:\n # skip data for option headonly\n continue\n else:\n # data entry - may be written in multiple columns\n # row beinhaltet die Werte eine Zeile\n elem = line[:-1].split()\n gottime = False\n\n try:\n array[0].append(date2num(datetime.strptime(elem[1],\"%Y-%m-%dT%H:%M:%S\")))\n add = 2\n gottime = True\n except:\n try:\n array[0].append(date2num(datetime.strptime(elem[1]+'T'+elem[2],\"%Y%m%dT%H%M%S\")))\n add = 3\n gottime = True\n except:\n raise ValueError(\"Can't read date format in RCS file\")\n if gottime:\n for i in range(len(unit)):\n try:\n array[i+1].append(float(elem[i+add]))\n except:\n array[i+1].append(float(nan))\n pass\n\n array = [np.asarray(el) for el in array]\n headers['SensorID'] = 'RCS{}_20160114_0001'.format(fieldpoint) # 20160114 corresponds to the date at which RCS was activated\n headers[\"SensorName\"] = 'RCS{}'.format(fieldpoint)\n headers[\"SensorSerialNum\"] = \"20160114\"\n headers[\"SensorRevision\"] = \"0001\"\n headers[\"SensorModule\"] = \"RCS\"\n headers[\"SensorGroup\"] = \"environment\"\n headers[\"SensorDataLogger\"] = \"{}\".format(fieldpoint)\n else:\n headers = stream.header\n stream =[]\n\n fh.close()\n\n\n return DataStream([LineStruct()], headers, np.asarray(array,dtype=object))",
"def read_netcdf(netcdf):\n print('Read GLDAS netCDF file')\n\n f = netCDF4.Dataset(netcdf, 'r')\n\n data = {}\n\n data['data'] = []\n data['filename'] = netcdf\n data['file'] = f\n data['longitude_size'] = len(f.dimensions['lon'])\n data['latitude_size'] = len(f.dimensions['lat'])\n data['time_size'] = len(f.dimensions['time'])\n data['longitude_array'] = f.variables['lon']\n data['latitude_array'] = f.variables['lat']\n data['time_array'] = f.variables['time']\n data['time_units'] = f.variables['time'].units\n data['time_step'] = get_time_step(data['time_array'])\n data['fill_value'] = get_fill_value(f)\n data['longitude_step'] = abs(data['longitude_array'][1] \\\n - data['longitude_array'][0])\n data['latitude_step'] = abs(data['latitude_array'][1] \\\n - data['latitude_array'][0])\n\n print(' - Number of longitudes : ' + str(data['longitude_size']))\n print(' - Number of latitudes : ' + str(data['latitude_size']))\n print(' - Number of time steps : ' + str(data['time_size']))\n print(' - Interval size for longitudes : ' + str(data['longitude_step']))\n print(' - Interval size for latitudes : ' + str(data['latitude_step']))\n print(' - Interval size for time : ' + str(data['time_step']))\n print(' - Fill value : ' + str(data['fill_value']))\n\n return data",
"def readMETEO(filename, headonly=False, **kwargs):\n\n starttime = kwargs.get('starttime')\n endtime = kwargs.get('endtime')\n takehelium = kwargs.get('takehelium')\n debug = kwargs.get('debug')\n getfile = True\n\n heliumcols = []\n\n stream = DataStream()\n\n if debug:\n print (\"METEO: found RCS meteo data\")\n\n # Check whether header infromation is already present\n headers = {}\n\n theday = extractDateFromString(filename)\n\n try:\n if starttime:\n if not theday[-1] >= datetime.date(stream._testtime(starttime)):\n getfile = False\n if endtime:\n if not theday[0] <= datetime.date(stream._testtime(endtime)):\n getfile = False\n except:\n print(\"Did not recognize the date format\")\n # Date format not recognized. Need to read all files\n getfile = True\n\n fh = open(filename, 'rb')\n\n array = [[] for key in KEYLIST]\n fkeys = []\n felements = []\n\n if getfile:\n for line in fh:\n line = line.decode('utf-8',errors='ignore')\n if line.isspace():\n # blank line\n continue\n elif line.startswith(' '):\n continue\n elif line.startswith('Date'):\n # Read the header information\n #1) first get number of columns\n cols = line.split()\n if not takehelium:\n try:\n columns = [elem for elem in cols if not elem.startswith('He')]\n except:\n print(\"Found error in header\", filename)\n columns = []\n else:\n columns = cols\n for i, elem in enumerate(columns):\n if i > 1:\n key = KEYLIST[i-1]\n fkeys.append(key)\n headers['col-'+key] = elem.replace('_','')\n headers['unit-col-'+key] = '-'\n\n else:\n colsstr = line.split()\n if not takehelium:\n try:\n colsstr = [elem for i, elem in enumerate(colsstr) if not cols[i].startswith('He')]\n except:\n print(\"Found error in data sequence\", filename)\n #print colsstr\n break\n row = LineStruct()\n try:\n date = colsstr[0]+'-'+colsstr[1]\n array[0].append(date2num(datetime.strptime(date,\"%Y%m%d-%H%M%S\")))\n #row.time = date2num(datetime.strptime(date,\"%Y%m%d-%H%M%S\"))\n for i in range(2,len(colsstr)):\n key = KEYLIST[i-1]\n if not key.startswith('str') and not key in ['flag','comment','typ']:\n array[i-1].append(float(colsstr[i]))\n #exec('row.'+key+' = float(colsstr[i])')\n elif not key in ['flag','comment','typ']:\n array[i-1].append(str(float(colsstr[i])))\n #exec('row.'+key+' = str(float(colsstr[i]))')\n #row.typ = 'other'\n #stream.add(row)\n except:\n pass\n\n for idx,el in enumerate(array):\n array[idx] = np.asarray(el)\n\n headers['SensorDescription'] = 'RCS: filtered Meteorlogical data - Andreas Winkelbauer'\n headers['SensorName'] = 'Various Meteorology sensors'\n headers['SensorID'] = 'METEO_RCS2015_0001'\n headers['SensorType'] = 'Various'\n headers['SensorModule'] = 'RCS'\n headers['SensorDataLogger'] = 'F77'\n headers['SensorGroup'] = 'environment'\n headers['DataFormat'] = 'RCSMETEO v3.0'\n headers['col-t2'] = '430UEV' # Necessary because of none UTF8 coding in header\n headers['col-f'] = 'T'\n headers['unit-col-f'] = 'deg C'\n headers['col-z'] = 'Schneehoehe'\n headers['unit-col-z'] = 'cm'\n if not takehelium:\n headers['col-t1'] = 'rh'\n headers['unit-col-t1'] = 'percent'\n headers['col-var5'] = 'P'\n headers['unit-col-var5'] = 'hPa'\n headers['col-var1'] = 'Wind'\n headers['unit-col-var1'] = 'm/s'\n\n headers['SensorKeys'] = ','.join(fkeys)\n headers['SensorElements'] = ','.join([headers['col-'+key] for key in KEYLIST if key in fkeys])\n\n if debug:\n print (\"METEO: Successfully loaded METEO data\")\n return DataStream([LineStruct()], headers, np.asarray(array,dtype=object))",
"def load_data_from_nc():\n \n file_data = Dataset(\"air.mon.mean.nc\", \"r\")\n latitudes = file_data.variables[\"lat\"][:] \n longitudes = file_data.variables[\"lon\"][:]\n times = file_data.variables[\"time\"][:] \n air_temperatures = file_data.variables[\"air\"][:] \n file_data.close()\n \n return latitudes, longitudes, times, air_temperatures",
"def load_file(filename):\r\n hdu = fits.open(filename)\r\n time = hdu[1].data[\"TIME\"]\r\n flux = hdu[1].data[\"PDCSAP_FLUX\"]\r\n flux[flux == 0] = numpy.nan\r\n return time, flux",
"def readData(self, n):\n f = open(self.file, \"rb\")\n fortran.skip(f)\n for i in range(n):\n fortran.skip(f) # Detector Header\n if self.detector[i].low_en_neutr_sc:\n fortran.skip(f) # Detector low energy neutron groups\n fortran.skip(f) # Detector data\n\n fortran.skip(f) # Detector Header\n if self.detector[n].low_en_neutr_sc:\n fortran.skip(f) # Detector low energy neutron groups\n data = fortran.read(f) # Detector data\n f.close()\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read in the Q_ext for dry murk. EW 21/02/17
|
def read_Q_dry_ext(ceil_lam):
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'
filename = 'calculated_Q_ext_' + str(ceil_lam) + 'nm.csv'
raw = np.loadtxt(miedir + filename, delimiter=',')
Q_ext_dry = {'radius': raw[:, 0],
'Q_ext': raw[:, 1]}
return Q_ext_dry
|
[
"def wq_from_file(self, water_quality_raw_data):",
"def get_qcodes(qrunes_file):\n startStr = '@qcodes:'\n endStr = '@script:'\n newLi = []\n info = fetch(startStr,endStr,qrunes_file,newLi)\n if not info :\n print('Please check the qcodes .')\n pass\n qcodes_content = ''.join(info)\n return qcodes_content",
"def read_from_qe_dos_txt(self):\n raise Exception(\"No function defined to read this quantity \"\n \"from a qe.dos.txt file\")",
"def readIqw(self, FileName, iqiq = True): # Verified 2020.0115\n\n print(\"*.IQW file does not have sampling rate. Please add to output\")\n BytesPerValue = 4\n try:\n file = open(FileName, \"rb\")\n data = file.read()\n file.close()\n except:\n print(\"File open error (\"+ FileName+\")!\")\n\n ReadSamples = len(data) // BytesPerValue\n data = list(struct.unpack(\"f\"*ReadSamples, data))\n if iqiq:\n self.__iqiq2complex__(data)\n else:\n self.__iiqq2complex__(data)",
"def ioneqRead(ioneqName='', minIoneq=1.e-20, verbose=False):\n dir = os.environ[\"XUVTOP\"]\n ioneqdir = os.path.join(dir,'ioneq')\n ioneqNames = util.listRootNames(ioneqdir)\n if ioneqName not in ioneqNames:\n # the user will select an ioneq file\n choice = chgui.gui.chpicker(ioneqdir, label='Select a single ioneq file')\n if choice.rootName in ioneqNames:\n fname = choice.fileName\n ioneqName = choice.rootName\n# fname1 = choice.baseName\n# fname1 = chgui.gui.chpicker(ioneqdir,filter='*.ioneq',label = 'Select an Ionization Equilibrium file')\n# fname = os.path.join(ioneqdir, fname1)\n if fname == None:\n print(' no ioneq file selected')\n return False\n else:\n ioneqfilename = os.path.basename(fname)\n ioneqname,ext = os.path.splitext(ioneqfilename)\n else:\n filelist = util.listFiles(ioneqdir)\n idx = ioneqNames.index(ioneqName)\n fname = filelist[idx]\n# fname = os.path.join(dir,'ioneq',ioneqname+'.ioneq')\n# newlist = fnmatch.filter(filelist, '*.ioneq')\n# baselist = []\n# for one in newlist:\n# baselist.append(os.path.basename(one))\n# cnt = baselist.count(ioneqname+'.ioneq')\n# if cnt == 0:\n# print((' ioneq file not found: ', fname))\n# print(' the following files do exist: ')\n# for one in newlist:\n# print((os.path.basename(one)))\n# return\n# elif cnt == 1:\n# idx = baselist.index(ioneqname+'.ioneq')\n# if verbose:\n# print((' file exists: ', newlist[idx]))\n# fname = newlist[idx]\n# elif cnt > 1:\n# print((' found more than one ioneq file', fname))\n# return\n #\n input = open(fname,'r')\n s1 = input.readlines()\n input.close()\n ntemp,nele = s1[0].split()\n if verbose:\n print((' ntemp, nele = %5i %5i'%(ntemp, nele)))\n nTemperature = int(ntemp)\n nElement = int(nele)\n #\n header_linet = FortranRecordReader(str(nTemperature)+'f6.2')\n ioneqTemperature = header_linet.read(s1[1])\n ioneqTemperature = np.asarray(ioneqTemperature[:],'Float64')\n ioneqTemperature = 10.**ioneqTemperature\n nlines = 0\n idx = -1\n while idx < 0:\n aline = s1[nlines][0:5]\n idx = aline.find('-1')\n nlines += 1\n nlines -= 1\n #\n #\n# ioneqformat=FortranFormat('2i3,'+str(nTemperature)+'e10.2')\n header_lineq = FortranRecordReader('2i3,'+str(nTemperature)+'e10.2')\n #\n ioneqAll = np.zeros((nElement,nElement+1,nTemperature),'Float64')\n for iline in range(2,nlines):\n# out=FortranLine(s1[iline],ioneqformat)\n out = header_lineq.read(s1[iline])\n iz = out[0]\n ion = out[1]\n ioneqAll[iz-1,ion-1].put(list(range(nTemperature)),np.asarray(out[2:],'Float64'))\n ioneqAll = np.where(ioneqAll > minIoneq, ioneqAll, 0.)\n ioneqRef = []\n for one in s1[nlines+1:]:\n ioneqRef.append(one[:-1]) # gets rid of the \\n\n del s1\n return {'ioneqname':ioneqName,'ioneqAll':ioneqAll,'ioneqTemperature':ioneqTemperature,'ioneqRef':ioneqRef}",
"def read(filename='Q'):\n f=open(filename,'r').read()\n i = 0\n filetype,= unpack('<64s',f[i:i+64*strSize]) ; i += 64*strSize\n version, = unpack('<i',f[i:i+intSize]) ; i += intSize\n comment, = unpack('<1024s',f[i:i+1024*strSize]) ; i += 1024*strSize\n D,N_q = unpack('<2i',f[i:i+2*intSize]) ; i += 2*intSize\n Q = unpack('<%id' % (N_q*D),f[i:])\n Q = numpy.array(Q)\n Q.shape = (N_q,D)\n return (filetype.strip('\\x00'),version,comment.strip('\\x00')),Q",
"def test_qual_out(self):\n records = SeqIO.parse(open(\"Quality/example.fastq\"),\"fastq\")\n h = StringIO(\"\")\n SeqIO.write(records, h, \"qual\")\n self.assertEqual(h.getvalue(),open(\"Quality/example.qual\").read())",
"def import_admix_Q(self,file_path):\n with open(file_path, 'r') as f:\n all_lines = f.readlines()\n\n if len(all_lines) == len(self.subject_list): # Check that the lines in the Q file match the lines in the fam\n i = 0\n\n for line in all_lines:\n # Following line of code splits the line into a list of ratios,casts the list to a list of floats then normalizes the values\n ratios = self.normalize_ratios([float(j) for j in line.split()])\n self.subject_list[i].values = ratios\n i += 1\n\n return self.subject_list\n\n else:\n return 'fam file and Q file do not match'",
"def test_example_qual(self) :\n write_read(os.path.join(\"Quality\", \"example.qual\"), \"qual\", \"fasta\")\n write_read(os.path.join(\"Quality\", \"example.qual\"), \"qual\", \"qual\")\n write_read(os.path.join(\"Quality\", \"example.qual\"), \"qual\", \"fastq\")\n write_read(os.path.join(\"Quality\", \"example.qual\"), \"qual\", \"fastq-sanger\")\n write_read(os.path.join(\"Quality\", \"example.qual\"), \"qual\", \"fastq-solexa\")\n write_read(os.path.join(\"Quality\", \"example.qual\"), \"qual\", \"fastq-illumina\")",
"def read_cg5(fh):\n meter, oper = None, None\n all_survey_data = ChannelList()\n\n for i, orig_line in enumerate(fh, 1):\n try:\n # Clean line\n line = orig_line.strip()\n\n # Skip blank and comment lines\n if (not line) or (line[0] == \"L\"):\n continue\n\n # Header line; look for useful information\n if line[0] == \"/\":\n vals_temp = line.split()\n if len(vals_temp) > 1:\n if vals_temp[1] == \"Instrument\":\n meter = vals_temp[-1]\n if vals_temp[1] == \"Operator:\":\n oper = vals_temp[-1]\n continue\n\n # parse string line first with respect to '/' characters (used in the date format),\n # then with ':' (used for the time display), eventually with the classic ' '\n vals_temp1 = line.split(\"/\")\n vals_temp2 = vals_temp1[0].split(\":\")\n vals_temp3 = vals_temp2[0].split()\n vals_temp4 = vals_temp2[2].split()\n\n # fill object properties:\n all_survey_data.line.append(float(vals_temp3[0]))\n s = vals_temp3[1].replace(\".0000000\", \"\")\n all_survey_data.station.append(s.strip())\n all_survey_data.elev.append(float(vals_temp3[2]))\n all_survey_data.raw_grav.append(\n float(vals_temp3[3]) * 1000.0 - float(vals_temp3[8]) * 1000.0\n ) # convert to microGal; remove tide correction\n all_survey_data.tare.append(0)\n all_survey_data.sd.append(float(vals_temp3[4]) * 1000.0)\n all_survey_data.tiltx.append(float(vals_temp3[5]))\n all_survey_data.tilty.append(float(vals_temp3[6]))\n all_survey_data.temp.append(float(vals_temp3[7]))\n all_survey_data.etc.append(float(vals_temp3[8]) * 1000.0)\n all_survey_data.meter_etc.append(float(vals_temp3[8]) * 1000.0)\n all_survey_data.dur.append(int(vals_temp3[9]))\n all_survey_data.rej.append(int(vals_temp3[10]))\n all_survey_data.t.append(\n date2num(\n dt.datetime(\n int(vals_temp4[3]),\n int(vals_temp1[1]),\n int(vals_temp1[2]),\n int(vals_temp3[11]),\n int(vals_temp2[1]),\n int(vals_temp4[0]),\n )\n )\n )\n\n all_survey_data.meter.append(meter or \"-999\")\n all_survey_data.oper.append(oper or \"-999\")\n\n all_survey_data.keepdata.append(1)\n except (IndexError, ValueError) as e:\n logging.exception(\"Error loading CG5 file at line %d\", i)\n logging.info(\"LINE: %s\", line)\n\n e.i = i\n e.line = orig_line\n raise e\n except ValueError as e:\n e.i = i\n e.line = orig_line\n raise e\n all_survey_data.meter_type = \"CG5\"\n return all_survey_data",
"def read_qa_brick(filename):\n from desispec.qa.qa_brick import QA_Brick\n # Read\n qa_data = read_qa_data(filename)\n\n # Instantiate\n qabrick = QA_Brick(in_data=qa_data)\n\n return qabrick",
"def get_import_file_name(qrunes_file):\n startStr = '@qcodes:'\n endStr = '@script:'\n newLi = []\n info = fetch(startStr,endStr,qrunes_file,newLi)\n import_arr = []\n for i in info:\n n = re.findall(r\".qrunes\", i)\n if n:\n import_file_name = i.split()[1].strip('\\'\"')\n # qrunes_file = os.path.dirname(qrunes_file)+\"\\\\\"+import_file_name)\n import_arr.append(import_file_name)\n return import_arr",
"def test_qual(self):\n records1 = list(SeqIO.parse(open(\"Quality/example.qual\"),\"qual\"))\n records2 = list(SeqIO.parse(open(\"Quality/example.fastq\"),\"fastq\"))\n #Will ignore the unknown sequences :)\n self.assert_(compare_records(records1, records2))",
"def calc_Q_ext_wet(ceil_lam, r_d, r_g, rh_frac, mod_time):\n import sys\n if sys.platform == 'linux2':\n sys.path.append('/net/home/mm0100/ewarren/Documents/AerosolBackMod/scripts/ellUtils') # general utils\n from ellUtils import nearest, netCDF_read, binary_search_nearest\n else:\n from ellUtils.ellUtils import nearest, netCDF_read, binary_search_nearest\n\n\n # Reading functions\n def read_f_RH(mod_time, ceil_lam):\n\n \"\"\"\n Read in the f_RH data from netCDF file\n EW 21/02/17\n\n :param mod_time (array of datetimes) datetimes for the timesteps\n :param ceil_lam: (int) ceilometer wavelength [nm]\n :return: data = {RH:... f_RH:...}\n\n \"\"\"\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH\n\n def read_Q_ext_dry(mod_time, ceil_lam):\n\n \"\"\"\n Read in the Q_ext for dry murk.\n EW 21/02/17\n\n :param mod_time (array of datetimes) datetimes for the timesteps\n :param ceil_lam: (int) ceilometer wavelength [nm]\n :return: Q_ext_dry = {radius:... Q_ext_dry:...}\n\n \"\"\"\n\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'urban_monthly_Q_ext_dry_' + str(ceil_lam) + 'nm.csv'\n\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n # format data into a dictionary\n Q_ext_dry = {'radius_m': raw[:, 0],\n 'Q_ext_dry': raw[:, 1:]} # Q_ext_dry['Q_ext_dry'].shape(radii, month)\n\n\n return Q_ext_dry\n\n # ---------------------------\n\n # cronvert geometric radius to nm to find f(RH)\n \n r_g_nm = r_g * 1.0e9\n\n # height idx range of r_d and RH\n height_idx_range = r_d.shape[1]\n\n # read in Q_ext_dry and f(RH) look up tables\n f_RH = read_f_RH(mod_time, ceil_lam) # ['f_RH MURK'].shape(month, radii, RH)\n Q_ext_dry = read_Q_ext_dry(mod_time, ceil_lam) #.shape(radii, month)\n\n # create matric of Q_ext_dry based on r_d\n Q_ext_dry_matrix = np.empty(r_d.shape)\n Q_ext_dry_matrix[:] = np.nan\n f_RH_matrix = np.empty(r_d.shape)\n f_RH_matrix[:] = np.nan\n\n # # find Q_ext dry, given the dry radius matrix\n # # find f(RH), given the RH fraction matric\n\n # loop through all elements of the array\n # idx is the full position of the element e.g. idx = (24L, 69L, 11L, 21L) - (time, height, lat, lon)\n for idx, _ in np.ndenumerate(r_d):\n\n # month idx for Q_ext_dry\n month_idx = mod_time[idx[0]].month - 1\n\n # debugging\n # if (idx[1] == 0) & (idx[2] == 0) & (idx[3] == 0):\n # print idx\n\n # Q_ext_dry - binary\n # LUT uses r_d (volume) [meters]\n r_Q_idx = binary_search_nearest(Q_ext_dry['radius_m'], r_d[idx])\n Q_ext_dry_matrix[idx] = Q_ext_dry['Q_ext_dry'][r_Q_idx, month_idx]\n\n\n # f(RH) (has it's own r_idx that is in units [nm])\n # LUT uses r_g (geometric) [nm] ToDo should change this to meters...\n r_f_RH_idx = binary_search_nearest(f_RH['radii_range'], r_g_nm[idx])\n rh_idx = binary_search_nearest(f_RH['RH'], rh_frac[idx])\n f_RH_matrix[idx] = f_RH['f(RH) MURK'][month_idx, r_f_RH_idx, rh_idx]\n\n # calculate Q_ext_wet\n Q_ext = Q_ext_dry_matrix * f_RH_matrix\n\n return Q_ext, Q_ext_dry_matrix, f_RH_matrix",
"def calc_Q_ext_wet(ceil_lam, r_md, RH):\n\n from ellUtils import nearest\n\n def read_f_RH(ceil_lam):\n \"\"\"\n Read in the f_RH data from csv\n EW 21/02/17\n\n :param filename:\n :return: data = {RH:... f_RH:...}\n\n filename must be in the form of 'calculated_ext_f(RH)_[ceil_lambda]nm.csv'\n \"\"\"\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH\n\n def read_Q_dry_ext(ceil_lam):\n \"\"\"\n Read in the Q_ext for dry murk.\n EW 21/02/17\n\n :param filename:\n :param lam:\n :return: Q_ext_dry = {radius:... Q_ext_dry:...}\n\n Requres the wavelength to be passed, just so in the future, the 910 nm file is not incorrectly used by mistake when\n it should use the file for another wavelength.\n \"\"\"\n\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n filename = 'calculated_Q_ext_' + str(ceil_lam) + 'nm.csv'\n\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n Q_ext_dry = {'radius': raw[:, 0],\n 'Q_ext': raw[:, 1]}\n\n return Q_ext_dry\n\n RH_factor = 0.01 # Relative Humidity in 0.38 not 38%\n\n # calculate Q_ext_wet\n f_RH = read_f_RH(ceil_lam)\n Q_ext_dry = read_Q_dry_ext(ceil_lam)\n\n # create matric of Q_ext_dry based on r_md\n Q_ext_dry_matrix = np.empty(r_md.shape)\n f_RH_matrix = np.empty(RH.shape)\n\n # find Q_ext dry, given the dry radius matrix\n if r_md.size != 1:\n for i in range(r_md.shape[0]):\n idx = nearest(Q_ext_dry['radius'], r_md[i])[1]\n Q_ext_dry_matrix[i] = Q_ext_dry['Q_ext'][idx]\n\n else:\n idx = nearest(Q_ext_dry['radius'], r_md)[1]\n Q_ext_dry_matrix = Q_ext_dry['Q_ext'][idx]\n\n # find f(RH), given the RH matrix\n # need RH factor as f_RH['RH'] in units of frac not percentage\n if RH.size != 1:\n for i in range(RH.shape[0]):\n idx = nearest(f_RH['RH'], RH_factor * RH[i])[1]\n f_RH_matrix[i] = f_RH['f_RH'][idx]\n else:\n idx = nearest(f_RH['RH'], RH_factor * RH)[1]\n f_RH_matrix = f_RH['f_RH'][idx]\n\n # calculate Q_ext_wet\n Q = Q_ext_dry_matrix * f_RH_matrix\n # print np.mean(Q_ext_dry_matrix[:,:20])\n\n return Q, Q_ext_dry_matrix, f_RH_matrix",
"def _read_analogies(self):\n questions = []\n questions_skipped = 0\n with open(self._options.eval_data, \"r\", encoding='utf-8') as analogy_f:\n for line in analogy_f:\n # if line.startswith(b\":\"): # Skip comments.\n # continue\n # words = line.decode('utf-8').strip().lower().split(b\" \")\n words = line.strip().lower().split(\" \")\n ids = [self._word2id.get(w.strip()) for w in words]\n if None in ids or len(ids) != 4:\n print (ids)\n questions_skipped += 1\n else:\n questions.append(np.array(ids))\n print(\"Eval analogy file: \", self._options.eval_data)\n print(\"Questions: \", len(questions))\n print(\"Skipped: \", questions_skipped)\n self._analogy_questions = np.array(questions, dtype=np.int32)",
"def init_fastq_record(fqr,n=9):\n return fqr[1][:n]",
"def get_qevr(self):\r\n qevr = str(self.inst.query(\"STAT:QUES?\"))\r\n return(qevr)",
"def Q2_query(QDrug, QDisease, options):\n\n # Pre-process query (get medic ids)\n drug = QDrug.strip().lower()\n drug_id = GNBR_api.get_MEDICID(drug)\n\n\n # Generate prefix for output file, is drug_disease\n if QDisease is not None:\n disease = QDisease.strip().lower()\n disease_id = GNBR_api.get_MEDICID(disease)\n out_name = drug + \"_\" + disease.replace(\" \", \"-\").lower()\n\n\n # Get list of genes causally annotated to a disease\n if options.verbose and options.batchFile is None:\n print(\"Querying GNBR for disease genes...\")\n dis_gene_list = GNBR_api.get_disease_gene_list(disease, freq_correct=True)\n\n # If first query did not work, try getting nearest matches for the query and try again\n if dis_gene_list is None:\n\n # Get matches\n disease2, match_type = GNBR_api.query_term_for_matches(disease)\n\n # If not fuzzy matching, but simple order matching, use the new disease query and proceed\n if match_type == 0:\n dis_gene_list = GNBR_api.get_disease_gene_list(disease2, freq_correct=True)\n # \n elif len(options.gene_list) >0:\n out_name = drug + \"__\"\n disease = None\n disease_id = None\n dis_gene_list = [GNBR_api.resolve_gene_to_EntrezGeneID(gene) for gene in options.gene_list]\n\n\n\n\n\n # Get list of drug targets from Pharos\n if options.verbose and options.batchFile is None:\n print(\"Querying Pharos for drug targets...\")\n drug_genes = pharos_api.get_ligand_targets(drug)\n\n # If Pharos did not return targets, pull them from the literature\n if drug_genes is None:\n if options.verbose and options.batchFile is None:\n print(\"Pharos did not contain drug target information, querying GNBR for drug targets...\")\n # Search GNBR for a drug target, via binding annotations\n drug_gene_list = GNBR_api.query_drug_target(drug)\n\n else:\n # If targets are from Pharos, map them to their Uniprot IDs\n drug_gene_list = []\n print(\"If targets are from Pharos, map them to their Uniprot IDs\")\n for gene in drug_genes:\n drug_gene_list.append(GNBR_api.resolve_gene_to_EntrezGeneID(gene))\n\n # If either disease or drug list comes up empty\n # the query has failed and we return a statement to that effect\n\n if dis_gene_list is None and drug_gene_list is None:\n print(\"ERROR: Drug and disease not recognized\")\n return \"ERROR: Drug and disease not recognized\"\n\n elif dis_gene_list is None:\n print(\"ERROR: Disease not recognized\")\n return \"ERROR: Disease not recognized\"\n\n elif drug_gene_list is None:\n print(\"ERROR: No drug targets found\")\n return \"ERROR: No drug targets found\"\n\n # If we have targets\n else:\n print(\"drug (%s) and disease (%s) found. Processing...\" % (drug, disease) )\n # Generate output directory\n overall_path = os.path.abspath(os.path.dirname(__file__))\n results_dir = os.path.join(*[overall_path, options.outPath, out_name])\n\n # filter out None\n dis_gene_list = [gene for gene in dis_gene_list if gene is not None]\n drug_gene_list = [gene for gene in drug_gene_list if gene is not None]\n\n if len(dis_gene_list) < 1:\n print(\"ERROR: No disease targets found\")\n return \"ERROR: No disease targets found\"\n\n if len(drug_gene_list) < 1:\n print(\"ERROR: No drug targets found\")\n return \"ERROR: No drug targets found\"\n\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n \n # Select the top 25 genes from the disease and then drug gene list for GO enrichment\n print(\"Getting disease genes Calling resolve_EntrezGeneID_to_NCBIGeneName\")\n dis_genes = pd.DataFrame([[GNBR_api.resolve_EntrezGeneID_to_NCBIGeneName(str(x)),x] for x in dis_gene_list], columns=[\"Gene\", \"Entrez ID\"])\n dis_genes_short = dis_genes[:min(len(dis_genes), 5)]\n dis_gene_list = list(map(int, dis_gene_list))\n\n \n print(\"Getting drug genes Calling resolve_EntrezGeneID_to_NCBIGeneName\")\n drug_genes = [[GNBR_api.resolve_EntrezGeneID_to_NCBIGeneName(x), x] for x in drug_gene_list]\n drug_genes = pd.DataFrame(drug_genes, columns=[\"Gene\", \"Entrez ID\"])\n drug_genes_short = drug_genes[:min(5, len(drug_genes))]\n\n\n # Get the GO terms for the drug targets\n drug_gene_list = list(map(int,drug_gene_list))\n drug_targets = GO_API.get_GO_terms(drug_gene_list)\n\n # Get GO Enrichment statistics then saving those to csv\n print(\"Getting Go Enrichment statistics\")\n if options.gen_image:\n go_result = GO_API.calc_GO_enrichment(dis_gene_list, os.path.join(results_dir, out_name), target_list=drug_targets, gen_image=True)\n else:\n go_result = GO_API.calc_GO_enrichment(dis_gene_list, \"\",target_list=drug_targets)\n\n go_result['gene_target'] = go_result['term'].isin(drug_targets)\n\n go_result = go_result.loc[go_result['rejected'] == 1.0, ['name', 'term', 'p', 'q', 'gene_target']]\n go_result = go_result.sort_values(by=['gene_target', 'q'], ascending=[False, True])\n go_result.to_csv(os.path.join(results_dir, out_name + \"_GO_pathway_enrichment.csv\"), mode=\"w+\", index_label=False, index=False)\n \n\n # Get GO Enrichment statistics\n go_result_short = go_result[:min(5, len(go_result))]\n\n # Start saving results\n result = {\"GOENRICH\":go_result, \"drug_genes\":drug_genes, \"disease_genes\":dis_genes, \"drug_id\": drug_id, \"disease_id\": disease_id,\n \"GOENRICH_short\":go_result_short, \"drug_genes_short\":drug_genes_short, \"disease_genes_short\":dis_genes_short,\n }\n \n\n # Get tissue information\n print(\"Getting tissue information resolved to disease via: resolve_EntrezGeneID_to_NCBIGeneName\")\n tissue_df_dis = TiGER_api.get_tissue_counts([GNBR_api.resolve_EntrezGeneID_to_NCBIGeneName(str(x)) for x in dis_gene_list])\n if tissue_df_dis is not None:\n tissue_df_dis_short = tissue_df_dis[:min(5, len(tissue_df_dis))]\n result[\"tissue_df_dis\"] = tissue_df_dis\n result[\"tissue_df_dis_short\"] = tissue_df_dis_short\n\n # Generate image\n print('Generating Image')\n if options.gen_image:\n file_name = os.path.join(results_dir, out_name + '.png')\n if os.path.exists(os.path.join(results_dir, out_name + '.dot')):\n subprocess.check_call(['dot', '-Tpng', os.path.join(results_dir, out_name + '.dot'), '-o', file_name])\n result[\"image_file\"] = file_name\n\n # Get Pubmed id, then get top 10 publication titles \n print(\"Getting Pubmed IDs and Titles\")\n if options.gen_pubmed and QDisease is not None:\n PMIDs = GNBR_api.query_chemical_disease(drug, disease, get_PMIDs=True)\n if len(PMIDs) > 0:\n PMID_df = pd.DataFrame([[x, get_PMID(x)] for x in PMIDs[:min(10, len(PMIDs))]], columns=[\"PMIDS\", \"Title\"])\n \n # will show top 5\n PMID_df_short = PMID_df[:min(5, len(PMID_df))]\n result[\"pubmed\"] = PMID_df\n \n result[\"pubmed_short\"] = PMID_df_short\n\n print('Saving pubmed PMIDs to ', results_dir, out_name,\"_PMIDs.csv\")\n PMID_df.to_csv(os.path.join(results_dir, out_name + \"_PMIDs.csv\"), mode=\"w+\",\n index_label=False, index=False, header=False)\n \n else:\n result[\"pubmed\"] = 'no PMIDs found'\n\n return(result)",
"def find_qreg_name(circuit_qasm: str) -> str:\n for line in circuit_qasm.splitlines():\n if line[0:5] == \"qreg \":\n qreg_name = \"\"\n for i in range(5,len(line)):\n if line[i] == \"[\" or line[i] == \";\":\n break\n elif line[i] != \" \":\n qreg_name += line[i]\n return qreg_name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract MURK aerosol and calculate RH for each of the sites in the ceil metadata Can retain the full forecast, or just for the day
|
def mod_site_extract_calc(day, ceil_metadata, modDatadir, model_type, res, ceil_lam,
fullForecast=False, Z=21, allvars=False, m_coeff=1.0, rh_coeff=1.0,
version=FOcon.aerFO_version, **kwargs):
# if 'nan_policy' in kwargs.keys():
def calc_RH(mod_T_celsius, mod_q, mod_r_v, mod_p):
"""
# calculate relative humidity
# Thermal Physics of the Atmosphere - Maarten's book.
"""
# -----------
# saturated vapour pressure (hPa, then Pa) - Teten's eq 5.18, pp. 98
e_s_hpa = 6.112 * (np.exp((17.67 * mod_T_celsius) / (mod_T_celsius + 243.5)))
e_s = e_s_hpa * 100
# mass mixing ratio of water vapour pp. 100
# now calculated outside function for use in water vapour absorption coefficient
# r_v = mod_q / (1 - mod_q)
# mass mixing ratio of water vapour at saturation eq 5.22, pp. 100
r_vs = 0.622 * (e_s / mod_p)
# relative humidity (variant of eq 5.24, pp 101)
# rescale rh if requested
mod_rh = mod_r_v / r_vs
return mod_rh
# Read in the modelled data for London
mod_all_data = read_all_mod_data(modDatadir, day, Z)
# define mod_data array
mod_data = {}
for site, loc in ceil_metadata.iteritems():
# define dictionary for the site
mod_data[site] = {}
# get the lon and lat idx for the instrument
idx_lon, idx_lat, _, _ = get_site_loc_idx_in_mod(mod_all_data, loc, model_type, res)
# Time extraction - pull out just the main day's data or the full forecast?
if fullForecast == False:
# only extract data for the main day
range_time = get_time_idx_forecast(mod_all_data, day)
else:
range_time = np.arange(len(mod_all_data['time']))
# extract the variables for that location
# rescale m by m_coeff. m_coeff = 1.0 by default so normally it is not rescaled
mod_aer = mod_all_data['aerosol_for_visibility'][range_time, :, idx_lat, idx_lon] * m_coeff
mod_q = mod_all_data['specific_humidity'][range_time, :, idx_lat, idx_lon]
mod_p = mod_all_data['air_pressure'][range_time, :, idx_lat, idx_lon]
mod_T = mod_all_data['air_temperature'][range_time, :, idx_lat, idx_lon]
mod_h = mod_all_data['level_height']
mod_time = np.array(mod_all_data['time'])[range_time] # should probably be done in the eu.netCDF_read function
mod_u = mod_all_data['x_wind'][range_time, :, idx_lat, idx_lon]
mod_v = mod_all_data['y_wind'][range_time, :, idx_lat, idx_lon]
mod_w = mod_all_data['upward_air_velocity'][range_time, :, idx_lat, idx_lon]
# extract Q_H (sensible heat flux) if it is there
if 'boundary_layer_sensible_heat_flux' in mod_all_data:
mod_Q_H = mod_all_data['boundary_layer_sensible_heat_flux'][range_time, :, idx_lat, idx_lon]
# Calculate some variables from those read in
# convert temperature to degree C
mod_T_celsius = mod_T - 273.15
# calculate water vapour mixing ratio [kg kg-1]: page 100 - Thermal physics of the atmosphere
mod_r_v = mod_q / (1 - mod_q)
# calculate virtual temperature eq. 1.31 (Ambaumm Notes), state gas constant for dry air
mod_Tv = (1 + (0.61 * mod_q)) * mod_T
# density of air [kg m-3] #ToDo gas constant is for dry air, should be for moist (small difference)
mod_rho = mod_p / (286.9 * mod_Tv)
# calculate RH [fraction 0-1] from temp [degC] specific humidity (q) [kg kg-1] ...
# and water vapour mixing ratio (r) [kg kg-1]
# Temp [degC] for use in the impirical equation
# scale RH by rh_coeff. Default = 1.0 (no scaling)
mod_rh_frac = calc_RH(mod_T_celsius, mod_q, mod_r_v, mod_p) * rh_coeff
# Replace variables with observations?
# Use NWP model or observed RH?
if 'obs_RH' not in kwargs:
rh_frac = mod_rh_frac
r_v = mod_r_v
else:
if kwargs['obs_RH'] == True:
# Read in observed RH data to take the place of model RH data
wxt_obs = read_wxt_obs(day, mod_time, mod_rh_frac)
obs_rh_frac = wxt_obs['RH_frac']
rh_frac = obs_rh_frac
r_v = wxt_obs['r_v']
else:
raise ValueError('obs_RH not set to True or False! Value must be a booleon')
# prcoess forward modelled backscatter for each site
FO_dict = forward_operator(mod_aer, rh_frac, r_v, mod_rho, mod_h, ceil_lam, version, mod_time, **kwargs)
# store MURK aerosol, RH and heights in mod_data dictionary
mod_data[site]['RH'] = rh_frac
mod_data[site]['aerosol_for_visibility'] = mod_aer
mod_data[site]['level_height'] = mod_h
mod_data[site]['time'] = mod_time
# check whether to return all the prognostic variables too
# returns all variables, not just the main ones like attenuated backscatter, RH, time and height
if allvars == True:
# add all the vars in FO_dict to the mod_data dictionary
mod_data[site].update(FO_dict)
# add the original UKV vars into mod_data
mod_data[site]['specific_humidity'] = mod_q
mod_data[site]['air_temperature'] = mod_T
mod_data[site]['air_pressure'] = mod_p
# wind variables too
mod_data[site]['u_wind'] = mod_u
mod_data[site]['v_wind'] = mod_v
mod_data[site]['w_wind'] = mod_w
# calculate murk concentration in air(from [kg kg-1 of air] to [kg m-3 of air])
# kg m-3_air = kg kg-1_air * kg_air m-3_air
mod_aer_conc = mod_aer * mod_rho
mod_data[site]['virtual_temperature'] = mod_Tv
mod_data[site]['air_density'] = mod_rho
mod_data[site]['aerosol_concentration_dry_air'] = mod_aer_conc
# if Q_H is in data, extract it
if 'boundary_layer_sensible_heat_flux' in mod_all_data:
mod_data[site]['Q_H'] = mod_Q_H
return mod_data
|
[
"def calc_Q_ext_wet(ceil_lam, r_md, RH):\n\n from ellUtils import nearest\n\n def read_f_RH(ceil_lam):\n \"\"\"\n Read in the f_RH data from csv\n EW 21/02/17\n\n :param filename:\n :return: data = {RH:... f_RH:...}\n\n filename must be in the form of 'calculated_ext_f(RH)_[ceil_lambda]nm.csv'\n \"\"\"\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH\n\n def read_Q_dry_ext(ceil_lam):\n \"\"\"\n Read in the Q_ext for dry murk.\n EW 21/02/17\n\n :param filename:\n :param lam:\n :return: Q_ext_dry = {radius:... Q_ext_dry:...}\n\n Requres the wavelength to be passed, just so in the future, the 910 nm file is not incorrectly used by mistake when\n it should use the file for another wavelength.\n \"\"\"\n\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n filename = 'calculated_Q_ext_' + str(ceil_lam) + 'nm.csv'\n\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n Q_ext_dry = {'radius': raw[:, 0],\n 'Q_ext': raw[:, 1]}\n\n return Q_ext_dry\n\n RH_factor = 0.01 # Relative Humidity in 0.38 not 38%\n\n # calculate Q_ext_wet\n f_RH = read_f_RH(ceil_lam)\n Q_ext_dry = read_Q_dry_ext(ceil_lam)\n\n # create matric of Q_ext_dry based on r_md\n Q_ext_dry_matrix = np.empty(r_md.shape)\n f_RH_matrix = np.empty(RH.shape)\n\n # find Q_ext dry, given the dry radius matrix\n if r_md.size != 1:\n for i in range(r_md.shape[0]):\n idx = nearest(Q_ext_dry['radius'], r_md[i])[1]\n Q_ext_dry_matrix[i] = Q_ext_dry['Q_ext'][idx]\n\n else:\n idx = nearest(Q_ext_dry['radius'], r_md)[1]\n Q_ext_dry_matrix = Q_ext_dry['Q_ext'][idx]\n\n # find f(RH), given the RH matrix\n # need RH factor as f_RH['RH'] in units of frac not percentage\n if RH.size != 1:\n for i in range(RH.shape[0]):\n idx = nearest(f_RH['RH'], RH_factor * RH[i])[1]\n f_RH_matrix[i] = f_RH['f_RH'][idx]\n else:\n idx = nearest(f_RH['RH'], RH_factor * RH)[1]\n f_RH_matrix = f_RH['f_RH'][idx]\n\n # calculate Q_ext_wet\n Q = Q_ext_dry_matrix * f_RH_matrix\n # print np.mean(Q_ext_dry_matrix[:,:20])\n\n return Q, Q_ext_dry_matrix, f_RH_matrix",
"def read_f_RH(ceil_lam):\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH",
"def read_f_RH(mod_time, ceil_lam):\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH",
"def ELRscript(model,mon,fday,fyr,day1,day2,nday,hdate_last,lit,liti,wk,nla1,sla1,wlo1,elo1,nla2,sla2,wlo2,elo2,fprefix,mpref,training_season,ntrain,rainfall_frequency,MOS):\n\n#%% model Hindcasts \n\tfh_xh = Dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc', mode='r')\n\tfh_yh = Dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc', mode='r')\n\n\tlons = fh_xh.variables['X'][:]\n\tlats = fh_xh.variables['Y'][:]\n\n\tx = fh_xh.variables['tp'][:]; x = np.squeeze(x)\n\ty = fh_yh.variables['tp'][:]\n\tndat1, nlat, nlon = np.shape(x)\n\tx1=x[:,1,1]\n\tI = np.where(x1>10000)\n\tbad_value_num=len(x1[I])\n\tndat=ndat1-bad_value_num\n\n#%% ELR: Train the models\n# Make a dictionary to contain the 'LogisticRegression' objects and terciles\n\telr_dict = {} # create an empty dictionary\n\telr_climo_dict = {} # create an empty dictionary for the climo forecast\n\n\tym = np.mean(y,axis=0)\n\tmsk = ma.getmask(ym)\n\tindex_land = np.empty((nlat,nlon),dtype=int)\n\txm0 = x\n\t#xm = xm0[0:int(ndat/2),:,:]\n\txm = xm0[0:lit,:,:]\n\n\tx0 = np.zeros(np.shape(xm)) # array of zeros to construct the climo forecast\n\tijland = -1\n\tfor j in range(nlat):\n\t# print(\"in j loop, j=\", j)\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\tindex_land[j,i] = ijland # index of land points\n\t\t\t\t#elr_dict[ijland] = elr_fit(xm[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\t#elr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\telr_dict[ijland] = elr_fit(xm[:,j,i], y[0:lit,j,i])\n\t\t\t\telr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:lit,j,i])\n\t\t\t# ijland is the dictionary key that can be used to assess the entries, like this\n\t\t\t# mymodel, mytercs = mydict[0]\n\t\t\t# mymodel.coef_\n\tnland = ijland+1\n\t#print('ELR training done with total landpoints = ',nland)\n\n\t#%% Make set of ELR in-sample hindcasts (no XV)\n\t#elr_hc = np.empty((ndat,nlat,nlon,3)); elr_hc.fill(np.nan)\n\t#elr_hc = np.empty((int(ndat/2),nlat,nlon)); elr_hc.fill(np.nan)\n\telr_hc = np.empty((lit,nlat,nlon)); elr_hc.fill(np.nan)\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\t#elr_hc[:,j,i,:] = elr_tercilesPredict(xm[:,j,i], terciles, elrmodel)\n\t\t\t\telr_hc[:,j,i] = elr_quantilePredict(xm[:,j,i], elrmodel)\n\n# ijland = index_land[lat1, lon1]\n# elrmodel, terciles = elr_dict[ijland]\n# elrmodel_climo, terciles = elr_climo_dict[ijland]\n# poe, q_fcst, q_clim, = elr_poe( xm[idat,lat1,lon1], elrmodel, elrmodel_climo )\n# plt.figure()\n\n\t#print('Set of ELR hindcasts made on a map of xy gridpoints')\n#---------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\t#T=int(ndat/2)\n\tT=lit\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(elr_hc, axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tTarr=np.empty(ndat,dtype=int)\n\tfor it in range(ndat):\n\t\tTarr[it]=1901+it\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(y[0:lit,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tT1=lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\tndat_fc = ndat-lit\n\txf = x[lit:ndat,:,:]\n\tyf = y[lit:ndat,:,:]\n\n#%% Verification period\n########################################\n\n\telr_fc = np.empty((ndat_fc,nlat,nlon,3)); elr_fc.fill(np.nan)\n\trpss_ELR_fc = np.ma.array(np.empty((nlat,nlon)), mask=msk, fill_value=np.nan)\n\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\telr_fc[:,j,i,:] = elr_tercilesPredict(xf[:,j,i], terciles, elrmodel)\n\t#print('Set of ELR forcasts made on a map of xy gridpoints')\n\n#----------------------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_verification.txt'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tf.write(\"cpt:ncats=3\\n\")\n\tW=nlon\n\tH=nlat\n\tds=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\tT=ndat-lit\n\tTarr1=Tarr[lit:]\n\tXarr=lons\n\tYarr1=lats\n\tYarr=Yarr1[::-1] #Y should from N to S\n\tvari='tp'\n\tvar=np.flip(elr_fc, axis=1)*100\n\tvar[np.isnan(var)]=-1.0 #use CPT missing value\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:C=1, cpt:clim_prob=0.33333333333300003, cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=probability (%), cpt:missing=-1.0000000000000000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,0]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=2, cpt:clim_prob=0.33333333333400000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,1]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=3, cpt:clim_prob=0.33333333333299997\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,2]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_verification.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\t#var=np.flip(y[int(ndat/2):,:,:], axis=1)\n\tvar=np.flip(y[lit:,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\t#T1=int(ndat/2)\n\tT1=ndat-lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()",
"def read_hourly_f_RH(mod_time, ceil_lam):\n\n import sys.platform as platform\n\n # file name and path\n if platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename)\n return f_RH",
"def get_asr(model, region):\n if INCLUDE_SHOCK: \n ind0=0\n else:\n ind0 = 96*2 # exclude first two days\n if model.lower()==\"nicam\":\n if region.lower()==\"twp\":\n swd = xr.open_dataset(ap.TWP_NICAM_SWD)['ss_swd_toa']\n swu = xr.open_dataset(ap.TWP_NICAM_SWU)['ss_swu_toa']\n elif region.lower()==\"nau\":\n swd = xr.open_dataset(ap.NAU_NICAM_SWD)['ss_swd_toa']\n swu = xr.open_dataset(ap.NAU_NICAM_SWU)['ss_swu_toa']\n elif region.lower()==\"shl\":\n swd = xr.open_dataset(ap.SHL_NICAM_SWD)['ss_swd_toa']\n swu = xr.open_dataset(ap.SHL_NICAM_SWU)['ss_swu_toa']\n asr = swd - swu\n del swd, swu\n elif model.lower()==\"fv3\":\n if region.lower()==\"twp\":\n swd = xr.open_dataset(ap.TWP_FV3_SWD)['fsdt']\n swu = xr.open_dataset(ap.TWP_FV3_SWU)['fsut']\n elif region.lower()==\"nau\":\n swd = xr.open_dataset(ap.NAU_FV3_SWD)['fsdt']\n swu = xr.open_dataset(ap.NAU_FV3_SWU)['fsut']\n elif region.lower()==\"shl\":\n swd = xr.open_dataset(ap.SHL_FV3_SWD)['fsdt']\n swu = xr.open_dataset(ap.NAU_FV3_SWU)['fsut']\n asr = swd - swu\n del swd, swu\n elif model.lower()==\"icon\":\n if region.lower()==\"twp\":\n asr = xr.open_dataset(ap.TWP_ICON_SWN)['ASOB_T']\n else:\n if region.lower()==\"nau\":\n rad = xr.open_dataset(ap.NAU_ICON_RAD)\n elif region.lower()==\"shl\":\n rad = xr.open_dataset(ap.SHL_ICON_RAD)\n swn = reshape.reshape(\"ASOB_T\", rad, dim=2)\n swn_un = util.undomean(swn, xy=False)\n asr = xr.DataArray(swn_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":swn.t.values, \"cell\":swn.cell})\n del swn, swn_un, rad\n elif model.lower()==\"sam\":\n print('sam')\n if region.lower()==\"twp\":\n asr = xr.open_dataset(ap.TWP_SAM_SWN)['SWNTA']\n elif region.lower()==\"nau\":\n asr = xr.open_dataset(ap.NAU_SAM_SWN)['SWNTA']\n elif region.lower()==\"shl\":\n asr = xr.open_dataset(ap.SHL_SAM_SWN)['SWNTA']\n print(asr.shape)\n else: raise Exception(\"Invalid Model %s; try NICAM, FV3, ICON, SAM.\")\n return asr[ind0:]",
"def calc_r_md_species(r_d_microns, met, aer_i):\n\n\n # calulate r_md based on Fitzgerald (1975) eqn 8 - 10\n def calc_r_md_t(r_d_microns, rh_i, alpha_factor):\n\n \"\"\"\n Calculate r_md for a single value of rh (rh_i) at a time t (alpha and beta will be applied to all rbins)\n :param rh_i:\n :param r_d_microns: NOt the duplicated array!\n :return: r_md_i\n\n\n The r_md calculated here will be for a fixed RH, therefore the single row of r_d_microns will be fine, as it\n will compute a single set of r_md as a result.\n \"\"\"\n\n beta = np.exp((0.00077 * rh_i) / (1.009 - rh_i))\n if rh_i < 0.97:\n phi = 1.058 - ((0.0155 * (rh_i - 0.97))\n / (1.02 - (rh_i ** 1.4)))\n else:\n phi = 1.058\n alpha = 1.2 * np.exp((0.066 * rh_i) / (phi - rh_i))\n\n # alpha factor comes from the Table 1 in Fitzgerald (1975) to be used with some other aerosol types\n r_md_t = alpha_factor * alpha * (r_d_microns ** beta)\n\n return r_md_t\n\n\n\n # duplicate the range of radii to multiple rows, one for each RH - shape(time, rbin).\n # Remember: the number in each diameter bin might change, but the bin diameters themselves will not.\n # Therefore this approach works for constant and time varying number distirbutions.\n r_d_microns_dup = np.tile(r_d_microns, (len(met['time']), 1))\n\n # Set up array for aerosol\n r_md = np.empty(len(met['time']))\n r_md[:] = np.nan\n\n phi = np.empty(len(met['time']))\n phi[:] = np.nan\n\n # limits for what approach to use, depending on the RH\n # from the CLASSIC guidence, follows Fitzgerald (1975)\n if aer_i == '(NH4)2SO4':\n rh_cap = 0.995 # calculate r_md specifically for the upper limit (considered max rh)\n rh_del = 0.81 # calculate r_md specifically for the upper limit (start of empirical formula)\n # CLASSIC does linear interpolation bettween rh_del and rh_eff.\n rh_eff = 0.3 # efflorescence (below is dry)\n alpha_factor = 1.0 # a coefficient for alpha, which is specific for different aerosol types\n elif aer_i == 'NH4NO3':\n rh_cap = 0.995\n rh_del = 0.61\n rh_eff = 0.3\n alpha_factor = 1.06\n\n elif aer_i == 'NaCl':\n rh_cap = 0.995\n rh_del = 0.75\n rh_eff = 0.42\n alpha_factor = 1.35\n\n # --------------------------------------------\n # Calculate r_md for the species, given RH\n # -----------------------------------------------\n\n # empirical relationships fitted for radius in micrometers, not meters (according to CLASSIC guidance).\n\n # --- delequescence - rh cap (defined as 0.995. Above this empirical relationship breaks down) --- #\n\n # Currently just calculates it for all, then gets overwritten lower down, depending on their RH (e.g. below eff)\n # ToDo use the rh_bet_del_cap to only calc for those within the del - cap range.\n\n # # between deliquescence and rh_cap (set at 0.995 for all)\n # bool = np.logical_and(WXT['RH_frac'] >= rh_del, WXT['RH_frac'] <= rh_cap)\n # rh_bet_del_cap = np.where(bool == True)[0]\n\n beta = np.exp((0.00077 * met['RH_frac'])/(1.009 - met['RH_frac']))\n rh_lt_97 = met['RH_frac'] < 0.97\n phi[rh_lt_97] = 1.058\n phi[~rh_lt_97] = 1.058 - ((0.0155 * (met['RH_frac'][~rh_lt_97] - 0.97))\n /(1.02 - (met['RH_frac'][~rh_lt_97] ** 1.4)))\n alpha = 1.2 * np.exp((0.066 * met['RH_frac'])/ (phi - met['RH_frac']))\n\n # duplicate values across to all radii bins to help r_md = .. calculation: alpha_dup.shape = (time, rbin)\n alpha_dup = np.tile(alpha, (len(r_d_microns), 1)).transpose()\n beta_dup = np.tile(beta, (len(r_d_microns), 1)).transpose()\n\n r_md = alpha_factor * alpha_dup * (r_d_microns_dup ** beta_dup)\n\n # --- above rh_cap ------#\n\n # set all r_md(RH>99.5%) to r_md(RH=99.5%) to prevent growth rates inconsistent with impirical equation.\n # replace all r_md values above 0.995 with 0.995\n rh_gt_cap = met['RH_frac'] > rh_cap\n r_md[rh_gt_cap, :] = calc_r_md_t(r_d_microns, rh_cap, alpha_factor)\n\n # --- 0 to efflorescence --- #\n\n # below efflorescence point (0.3 for sulhate, r_md = r_d)\n rh_lt_eff = met['RH_frac'] <= rh_eff\n r_md[rh_lt_eff, :] = r_d_microns\n\n # ------ efflorescence to deliquescence ----------#\n\n # calculate r_md for the deliquescence rh - used in linear interpolation\n r_md_del = calc_r_md_t(r_d_microns, rh_del, alpha_factor)\n\n # all values that need to have some linear interpolation\n bool = np.logical_and(met['RH_frac'] >= rh_eff, met['RH_frac'] <= rh_del)\n rh_bet_eff_del = np.where(bool == True)[0]\n\n # between efflorescence point and deliquescence point, r_md is expected to value linearly between the two\n low_rh = rh_eff\n up_rh = rh_del\n low_r_md = r_d_microns\n up_r_md = r_md_del\n\n diff_rh = up_rh - low_rh\n diff_r_md = r_md_del - r_d_microns\n abs_diff_r_md = abs(diff_r_md)\n\n # find distance rh is along linear interpolation [fraction] from lower limit\n # frac = np.empty(len(r_md))\n # frac[:] = np.nan\n frac = ((met['RH_frac'][rh_bet_eff_del] - low_rh) / diff_rh)\n\n # duplicate abs_diff_r_md by the number of instances needing to be interpolated - helps the calculation below\n # of r_md = ...low + (frac * abs diff)\n abs_diff_r_md_dup = np.tile(abs_diff_r_md, (len(rh_bet_eff_del), 1))\n frac_dup = np.tile(frac, (len(r_d_microns), 1)).transpose()\n\n # calculate interpolated values for r_md\n r_md[rh_bet_eff_del, :] = low_r_md + (frac_dup * abs_diff_r_md_dup)\n\n return r_md",
"def calculate_erosivity():\n def erosivity_op(dem, precip):\n \"\"\"Calculate erovisity from elevation and annual precipitation.\n\n Parameters:\n dem (numpy.ndarray): elevation in m above sea level\n precip (numpy.ndarray): average annual precip in mm\n\n Returns:\n rainfall erosivity, MJ mm per ha per h per year\n\n \"\"\"\n lon_val = -76.26 # watershed centroid longitude\n lat_val = -10.17 # watershed centroid latitude\n # regression coefficients published by Riquetti et al. 2020\n b0 = 0.27530\n b1 = 0.02266\n b2 = -0.00017067\n b3 = 0.65773\n b4 = 6.0497e-08\n valid_mask = (\n (~numpy.isclose(dem, dem_nodata)) &\n (~numpy.isclose(precip, input_nodata)))\n\n log_R = numpy.empty(dem.shape, dtype=numpy.float32)\n log_R[:] = input_nodata\n log_R[valid_mask] = (\n b0 + b1 * lon_val + b2 * (lon_val * lat_val) + b3 *\n numpy.log(precip[valid_mask]) + b4 * dem[valid_mask] *\n precip[valid_mask])\n\n erosivity = numpy.empty(dem.shape, dtype=numpy.float32)\n erosivity[:] = input_nodata\n erosivity[valid_mask] = 10**log_R[valid_mask]\n return erosivity\n\n def simple_erosivity_op(precip):\n \"\"\"Calculate erosivity as annual precip * 2.2.\"\"\"\n valid_mask = (~numpy.isclose(precip, input_nodata))\n result = numpy.empty(precip.shape, dtype=numpy.float32)\n result[:] = input_nodata\n result[valid_mask] = precip[valid_mask] * 2.2\n return result\n\n dem_path = \"C:/Users/ginge/Dropbox/NatCap_backup/Moore_Amazon/SDR_SWY_data_inputs/projected/HydroSHEDS_CON_Chaglla_UTM18S.tif\"\n target_pixel_size = pygeoprocessing.get_raster_info(dem_path)['pixel_size']\n dem_nodata = pygeoprocessing.get_raster_info(dem_path)['nodata'][0]\n intermediate_dir = tempfile.mkdtemp()\n precip_dir = \"F:/Moore_Amazon_backups/precipitation\"\n out_dir = \"F:/Moore_Amazon_backups/precipitation/erosivity_Riquetti\"\n # out_dir = \"F:/Moore_Amazon_backups/precipitation/erosivity_simple\"\n for year in ['50', '70']: # year after 2000\n for rcp in [2.6, 6.0, 8.5]: # RCP\n path_list = [os.path.join(\n precip_dir, 'year_20{}'.format(year), 'rcp_{}'.format(rcp),\n \"mi{}pr{}{}.tif\".format(int(rcp * 10), year, m)) for m in\n range(1, 13)]\n input_nodata = pygeoprocessing.get_raster_info(\n path_list[0])['nodata'][0]\n annual_precip_path = os.path.join(intermediate_dir, 'annual.tif')\n raster_list_sum(\n path_list, input_nodata, annual_precip_path, input_nodata)\n\n # align annual precipitation with DEM\n aligned_annual_precip_path = os.path.join(\n intermediate_dir, 'aligned_annual_precip.tif')\n aligned_dem_path = os.path.join(\n intermediate_dir, 'aligned_dem.tif')\n target_raster_path_list = [\n aligned_annual_precip_path, aligned_dem_path]\n pygeoprocessing.align_and_resize_raster_stack(\n [annual_precip_path, dem_path], target_raster_path_list,\n ['near'] * len(target_raster_path_list), target_pixel_size,\n 'intersection')\n\n out_path = os.path.join(\n out_dir, 'erosivity_year{}_rcp{}.tif'.format(year, rcp))\n pygeoprocessing.raster_calculator(\n [(aligned_dem_path, 1), (aligned_annual_precip_path, 1)],\n erosivity_op, out_path, gdal.GDT_Float32, input_nodata)\n\n # current\n current_dir = \"F:/Moore_Amazon_backups/precipitation/current\"\n path_list = [\n os.path.join(current_dir, 'wc2.1_5m_prec_{}.tif'.format(m)) for m\n in range(1, 13)]\n input_nodata = pygeoprocessing.get_raster_info(\n path_list[0])['nodata'][0]\n annual_precip_path = os.path.join(intermediate_dir, 'annual.tif')\n raster_list_sum(\n path_list, input_nodata, annual_precip_path, input_nodata)\n\n # align annual precipitation with DEM\n aligned_annual_precip_path = os.path.join(\n intermediate_dir, 'aligned_annual_precip.tif')\n aligned_dem_path = os.path.join(\n intermediate_dir, 'aligned_dem.tif')\n target_raster_path_list = [\n aligned_annual_precip_path, aligned_dem_path]\n pygeoprocessing.align_and_resize_raster_stack(\n [annual_precip_path, dem_path], target_raster_path_list,\n ['near'] * len(target_raster_path_list), target_pixel_size,\n 'intersection')\n\n out_path = os.path.join(out_dir, 'erosivity_current.tif')\n pygeoprocessing.raster_calculator(\n [(aligned_dem_path, 1), (aligned_annual_precip_path, 1)],\n erosivity_op, out_path, gdal.GDT_Float32, input_nodata)",
"def get_forecast_for_tomorrow(data):\n description = None\n precis = None\n temperature_min = None\n temperature_max = None\n\n forecasts = []\n chunks = data.split(\"\\n\\n\")\n for i, chunk in enumerate(chunks):\n if chunk.startswith(\"Forecast for \"):\n forecasts.append(i)\n\n TwoForecastsPresent = len(forecasts) > 1\n\n if TwoForecastsPresent:\n\n # typically the forecast for tomorrow spans two chunks. The first\n # contains the description and the second contains the precis and\n # temperature.\n tomorrow_forecast_index = forecasts[1]\n tomorrowsForecast = chunks[tomorrow_forecast_index]\n\n description = tomorrowsForecast.split(\"\\n\", 1)[0]\n description = description.replace(\"Forecast for \", \"\")\n description = description.strip()\n\n content = tomorrowsForecast.split(\"\\n\")[1]\n content = content.strip()\n # prefer the longer description over the shorter precis\n precis = content\n\n # the temperatures for tomorrow's forecast appears to always be in\n # the following block.\n tomorrow_details = chunks[tomorrow_forecast_index + 1]\n\n if tomorrow_details.startswith('Precis'):\n lines = tomorrow_details.split(\"\\n\")\n precis_line = lines[0]\n\n if precis_line.startswith(\"Precis\"):\n precis = precis_line.replace(\"Precis\", \"\")\n precis = precis.replace(\":\", \"\")\n precis = precis.strip()\n if precis.endswith(\".\"):\n precis = precis[:-1]\n\n # temp typically follows the precis line, but not always\n if len(lines) > 1:\n temp_line = lines[1]\n items = temp_line.split(\" \")\n items = filter(None, items) # remove empty items\n\n if len(items) == 3:\n _, temperature_min, temperature_max = items\n elif len(items) == 2:\n _, temperature_max = items\n\n if temperature_min:\n temperature_min = temperature_min.replace(\"Min\", \"\")\n temperature_min = temperature_min.strip()\n\n if temperature_max:\n temperature_max = temperature_max.replace(\"Max\", \"\")\n temperature_max = temperature_max.strip()\n # temp appears to alway be last item on line\n temp_line = temp_line.strip()\n _temperature = temp_line.split()[-1]\n\n else:\n\n forecast_line = tomorrow_details.split(\"\\n\")[0]\n items = forecast_line.split(\" \")\n items = filter(None, items) # remove empty items\n try:\n location, _, temperature_min, temperature_max = items\n\n temperature_min = temperature_min.replace(\"Min\", \"\")\n temperature_min = temperature_min.strip()\n\n temperature_max = temperature_max.replace(\"Max\", \"\")\n temperature_max = temperature_max.strip()\n\n except ValueError, ex:\n logging.error(\"Error extracting 4 items from line: \\'%s\\'. items=%s\" % (forecast_line, str(items)))\n logging.exception(ex)\n\n else:\n # try one of the other formats which looks like this:\n # Sunday Fine, partly cloudy. Min 12 Max 24\n # Monday A few showers. Min 13 Max 23\n # Tuesday A few showers. Min 14 Max 23\n # Wednesday A few showers. Min 13 Max 24\n # Thursday A few showers. Min 15 Max 25\n # Friday Showers.\n #\n # This block format seems to always follow the UV Alert block\n tomorrow_forecast_index = None\n for i, chunk in enumerate(chunks):\n # typically the chunk starts with UV Alert but sometimes it\n # can be bunched up with the chunk before.\n if \"UV Alert\" in chunk:\n tomorrow_forecast_index = i + 1\n break\n\n if tomorrow_forecast_index is not None:\n tomorrowsForecast = chunks[tomorrow_forecast_index]\n forecast_line = tomorrowsForecast.split(\"\\n\")[0]\n\n items = forecast_line.split(\" \")\n items = filter(None, items) # remove empty items\n description, precis, temperature_min, temperature_max = items\n\n description = description.strip()\n\n precis = precis.strip()\n if precis.endswith(\".\"):\n precis = precis[:-1]\n\n temperature_min = temperature_min.replace(\"Min\", \"\")\n temperature_min = temperature_min.strip()\n\n temperature_max = temperature_max.replace(\"Max\", \"\")\n temperature_max = temperature_max.strip()\n\n return (description, precis, temperature_min, temperature_max)",
"def calc_Q_ext_wet(ceil_lam, r_d, r_g, rh_frac, mod_time):\n import sys\n if sys.platform == 'linux2':\n sys.path.append('/net/home/mm0100/ewarren/Documents/AerosolBackMod/scripts/ellUtils') # general utils\n from ellUtils import nearest, netCDF_read, binary_search_nearest\n else:\n from ellUtils.ellUtils import nearest, netCDF_read, binary_search_nearest\n\n\n # Reading functions\n def read_f_RH(mod_time, ceil_lam):\n\n \"\"\"\n Read in the f_RH data from netCDF file\n EW 21/02/17\n\n :param mod_time (array of datetimes) datetimes for the timesteps\n :param ceil_lam: (int) ceilometer wavelength [nm]\n :return: data = {RH:... f_RH:...}\n\n \"\"\"\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH\n\n def read_Q_ext_dry(mod_time, ceil_lam):\n\n \"\"\"\n Read in the Q_ext for dry murk.\n EW 21/02/17\n\n :param mod_time (array of datetimes) datetimes for the timesteps\n :param ceil_lam: (int) ceilometer wavelength [nm]\n :return: Q_ext_dry = {radius:... Q_ext_dry:...}\n\n \"\"\"\n\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'urban_monthly_Q_ext_dry_' + str(ceil_lam) + 'nm.csv'\n\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n # format data into a dictionary\n Q_ext_dry = {'radius_m': raw[:, 0],\n 'Q_ext_dry': raw[:, 1:]} # Q_ext_dry['Q_ext_dry'].shape(radii, month)\n\n\n return Q_ext_dry\n\n # ---------------------------\n\n # cronvert geometric radius to nm to find f(RH)\n \n r_g_nm = r_g * 1.0e9\n\n # height idx range of r_d and RH\n height_idx_range = r_d.shape[1]\n\n # read in Q_ext_dry and f(RH) look up tables\n f_RH = read_f_RH(mod_time, ceil_lam) # ['f_RH MURK'].shape(month, radii, RH)\n Q_ext_dry = read_Q_ext_dry(mod_time, ceil_lam) #.shape(radii, month)\n\n # create matric of Q_ext_dry based on r_d\n Q_ext_dry_matrix = np.empty(r_d.shape)\n Q_ext_dry_matrix[:] = np.nan\n f_RH_matrix = np.empty(r_d.shape)\n f_RH_matrix[:] = np.nan\n\n # # find Q_ext dry, given the dry radius matrix\n # # find f(RH), given the RH fraction matric\n\n # loop through all elements of the array\n # idx is the full position of the element e.g. idx = (24L, 69L, 11L, 21L) - (time, height, lat, lon)\n for idx, _ in np.ndenumerate(r_d):\n\n # month idx for Q_ext_dry\n month_idx = mod_time[idx[0]].month - 1\n\n # debugging\n # if (idx[1] == 0) & (idx[2] == 0) & (idx[3] == 0):\n # print idx\n\n # Q_ext_dry - binary\n # LUT uses r_d (volume) [meters]\n r_Q_idx = binary_search_nearest(Q_ext_dry['radius_m'], r_d[idx])\n Q_ext_dry_matrix[idx] = Q_ext_dry['Q_ext_dry'][r_Q_idx, month_idx]\n\n\n # f(RH) (has it's own r_idx that is in units [nm])\n # LUT uses r_g (geometric) [nm] ToDo should change this to meters...\n r_f_RH_idx = binary_search_nearest(f_RH['radii_range'], r_g_nm[idx])\n rh_idx = binary_search_nearest(f_RH['RH'], rh_frac[idx])\n f_RH_matrix[idx] = f_RH['f(RH) MURK'][month_idx, r_f_RH_idx, rh_idx]\n\n # calculate Q_ext_wet\n Q_ext = Q_ext_dry_matrix * f_RH_matrix\n\n return Q_ext, Q_ext_dry_matrix, f_RH_matrix",
"def get_temp(model, region):\n if INCLUDE_SHOCK: \n ind0=0\n else:\n ind0 = 8*2 # exclude first two days\n if model.lower()==\"nicam\":\n if region.lower()==\"twp\":\n t = xr.open_dataset(ap.TWP_NICAM_T)[\"ms_tem\"][ind0:]\n elif region.lower()==\"shl\":\n t = xr.open_dataset(ap.SHL_NICAM_T)[\"ms_tem\"][ind0:]\n elif region.lower()==\"nau\":\n t = xr.open_dataset(ap.NAU_NICAM_T)[\"ms_tem\"][ind0:]\n else: raise Exception(\"region not valid, try SHL, NAU, or TWP\")\n elif model.lower()==\"fv3\":\n if region.lower()==\"twp\":\n t = xr.open_dataset(ap.TWP_FV3_T)[\"temp\"][ind0:]\n elif region.lower()==\"shl\":\n t = xr.open_dataset(ap.TWP_FV3_T)[\"temp\"][ind0:]\n elif region.lower()==\"nau\":\n t = xr.open_dataset(ap.TWP_FV3_T)[\"temp\"][ind0:]\n else: raise Exception(\"region not valid, try SHL, NAU, or TWP\")\n elif model.lower()==\"icon\":\n if region.lower()==\"twp\":\n t = xr.open_dataset(ap.TWP_ICON_T)[\"NEW\"][ind0:]\n elif region.lower()==\"shl\":\n t = xr.open_dataset(ap.SHL_ICON_T) #K\n t = reshape.reshape(\"T\", t, dim=3)[ind0:]\n elif region.lower()==\"nau\":\n t = xr.open_dataset(ap.NAU_ICON_T)[\"T\"][ind0:]\n else: raise Exception(\"region not valid, try SHL, NAU, or TWP\")\n elif model.lower()==\"sam\":\n if region.lower()==\"twp\":\n t = xr.open_dataset(ap.TWP_SAM_T)[\"TABS\"][ind0:,:]\n elif region.lower()==\"shl\":\n t = xr.open_dataset(ap.SHL_SAM_T)[\"TABS\"][ind0:,:]\n elif region.lower()==\"nau\":\n t = xr.open_dataset(ap.NAU_SAM_T)[\"TABS\"][ind0:,:]\n else: raise Exception(\"try valid region (SHL, NAU, TWP)\")\n else: raise Exception(\"invalide model: model = SAM, ICON, FV3, NICAM\")\n print(\"\\t returned temperature with shape\", t.shape)\n return t",
"def get_five_day_forecast(data):\n nextFiveDays = []\n\n forecasts = []\n chunks = data.split(\"\\n\\n\")\n chunks = [chunk.lstrip() for chunk in chunks] # remove any leading '\\n'\n for i, chunk in enumerate(chunks):\n if chunk.startswith(\"Forecast for \"):\n if not chunk.startswith(\"Forecast for the rest of \"):\n forecasts.append(i)\n\n FiveForecastsPresent = len(forecasts) > 5\n\n if FiveForecastsPresent:\n FiveForcasts = forecasts[:5]\n for index in FiveForcasts:\n\n forecast_line = chunks[index]\n day_name = forecast_line.split(\"\\n\")[0]\n day_name = day_name.replace(\"Forecast for \", \"\")\n day_name = day_name.strip()\n\n # The short form forecast details are typically in the\n # following chunk from the long forecast.\n chunk = chunks[index + 1]\n forecast_line = chunk.split(\"\\n\", 1)[0]\n\n items = forecast_line.split(\" \")\n items = filter(None, items) # remove empty items\n\n if len(items) == 3:\n # occasionally the precis and min temp are not separated\n # by a space. Eg. Sunny.Min 9\n _, precis_and_min, temperature_max = items\n precis, temperature_min = precis_and_min.rsplit(\".\", 1)\n else:\n _, precis, temperature_min, temperature_max = items\n\n precis = precis.strip()\n if precis.endswith(\".\"):\n precis = precis[:-1]\n\n temperature_min = temperature_min.replace(\"Min\", \"\")\n temperature_min = temperature_min.strip()\n\n temperature_max = temperature_max.replace(\"Max\", \"\")\n temperature_max = temperature_max.strip()\n\n nextFiveDays.append((day_name, temperature_min, temperature_max, precis))\n\n else:\n # try one of the other formats which looks like this:\n # Sunday Fine, partly cloudy. Min 12 Max 24\n # Monday A few showers. Min 13 Max 23\n # Tuesday A few showers. Min 14 Max 23\n # Wednesday A few showers. Min 13 Max 24\n # Thursday A few showers. Min 15 Max 25\n # Friday Showers.\n #\n # This block format seems to always follow the UV Alert block\n five_day_forecast_candidate_index = None\n for i, chunk in enumerate(chunks):\n # typically the chunk starts with UV Alert but sometimes it\n # can be bunched up with the chunk before.\n if \"UV Alert\" in chunk:\n five_day_forecast_candidate_index = i + 1\n break\n\n if five_day_forecast_candidate_index is not None:\n\n # sometimes there can be the second day's forecasts after the UV Alert\n # which is then followed by the five day forecast. Crazy!\n five_day_forecast = chunks[five_day_forecast_candidate_index]\n if five_day_forecast.startswith(\"Forecast for \"):\n # skip this and the next chunk\n five_day_forecast = chunks[five_day_forecast_candidate_index + 2]\n\n forecast_lines = five_day_forecast.split(\"\\n\")\n for forecast_line in forecast_lines:\n items = forecast_line.split(\" \")\n items = filter(None, items) # remove empty items\n day_name, precis, temperature_min, temperature_max = items\n\n day_name = day_name.strip()\n\n precis = precis.strip()\n if precis.endswith(\".\"):\n precis = precis[:-1]\n\n temperature_min = temperature_min.replace(\"Min\", \"\")\n temperature_min = temperature_min.strip()\n\n temperature_max = temperature_max.replace(\"Max\", \"\")\n temperature_max = temperature_max.strip()\n\n nextFiveDays.append((day_name, temperature_min, temperature_max, precis))\n\n return nextFiveDays",
"def get_olr_alb(model, region):\n if INCLUDE_SHOCK: \n ind0=0\n else:\n ind0 = 8*2 # exclude first two days\n if model.lower()==\"nicam\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for NICAM TWP:\")\n st= time.time()\n olr = xr.open_dataset(ap.TWP_NICAM_OLR)['sa_lwu_toa'][11::12,:,:,:]\n swu = xr.open_dataset(ap.TWP_NICAM_SWU)['ss_swu_toa'][11::12,:,:,:]\n swd = xr.open_dataset(ap.TWP_NICAM_SWD)['ss_swd_toa'][11::12,:,:,:]\n print(\"... calculating albedo for shape\",olr.shape,swu.shape,swd.shape)\n elif (region.lower()==\"nau\") or (region.lower()==\"nauru\"):\n print(\"Getting olr and albedo for NICAM NAURU:\")\n st= time.time()\n olr = xr.open_dataset(ap.NAU_NICAM_OLR)['sa_lwu_toa'][11::12,:,:,:]\n swu = xr.open_dataset(ap.NAU_NICAM_SWU)['ss_swu_toa'][11::12,:,:,:]\n swd = xr.open_dataset(ap.NAU_NICAM_SWD)['ss_swd_toa'][11::12,:,:,:]\n print(\"... calculating albedo for shape\",olr.shape,swu.shape,swd.shape)\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for NICAM SAHEL:\")\n st= time.time()\n olr = xr.open_dataset(ap.SHL_NICAM_OLR)['sa_lwu_toa'][11::12,:,:,:]\n swu = xr.open_dataset(ap.SHL_NICAM_SWU)['ss_swu_toa'][11::12,:,:,:]\n swd = xr.open_dataset(ap.SHL_NICAM_SWD)['ss_swd_toa'][11::12,:,:,:]\n print(\"... calculating albedo for shape\",olr.shape,swu.shape,swd.shape)\n else: print(\"Region not supported (try TWP, NAU, SHL)\")\n alb = swu/swd\n alb = alb[ind0:]\n olr = olr[ind0:]\n del swu, swd\n print(\"... calculated albedo and opened olr (%s seconds elapsed)...\"%str(time.time()-st))\n elif model.lower()==\"fv3\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for FV3 TWP:\")\n olr = xr.open_dataset(ap.TWP_FV3_OLR)[\"flut\"][11::12,:,:]\n swu = xr.open_dataset(ap.TWP_FV3_SWU)[\"fsut\"][11::12,:,:]\n swu = swu[ind0:]\n swd = get_swd(\"FV3\", \"TWP\")[11::12,:,:]\n alb = swu.values/swd\n print(olr.shape, alb.shape)\n elif region.lower()==\"nau\":\n print(\"Getting olr and albedo for FV3 NAU:\")\n olr = xr.open_dataset(ap.NAU_FV3_OLR)[\"flut\"][11::12,:,:]\n swu = xr.open_dataset(ap.NAU_FV3_SWU)[\"fsut\"][11::12,:,:]\n swu = swu[ind0:]\n swd = get_swd(\"FV3\", \"NAU\")[11::12,:,:]\n alb = swu.values/swd\n print(olr.shape, alb.shape)\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for FV3 SHL:\")\n olr = xr.open_dataset(ap.SHL_FV3_OLR)[\"flut\"][11::12,:,:]\n swu = xr.open_dataset(ap.SHL_FV3_SWU)[\"fsut\"][11::12,:,:]\n swu = swu[ind0:]\n swd = get_swd(\"FV3\", \"SHL\")[11::12,:,:]\n alb = swu.values/swd\n print(olr.shape, alb.shape)\n else: \n raise Exception(\"Region not supported. Try 'TWP', 'NAU', 'SHL'.\")\n alb = alb\n olr = olr[ind0:]\n elif model.lower()==\"icon\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for ICON TWP:\")\n olr = xr.open_dataset(ap.TWP_ICON_OLR)[\"ATHB_T\"]\n swu = xr.open_dataset(ap.TWP_ICON_SWU)[\"ASOU_T\"]\n swn = xr.open_dataset(ap.TWP_ICON_SWN)[\"ASOB_T\"]\n swd = swn + swu.values\n del swn\n alb = swu/swd.values\n alb = alb.where((alb.values>0)&(swd.values>0)&(alb.values<1))\n elif region.lower()==\"nau\":\n print(\"Getting olr and albedo for ICON NAU:\")\n rad = xr.open_dataset(ap.NAU_ICON_RAD)\n olr = reshape.reshape(\"ATHB_T\", rad, dim=2)\n swu = reshape.reshape(\"ASOU_T\", rad, dim=2)\n swn = reshape.reshape(\"ASOB_T\", rad, dim=2)\n del rad\n olr_un = util.undomean(olr, xy=False)\n swu_un = util.undomean(swu, xy=False)\n del swu\n swn_un = util.undomean(swn, xy=False)\n del swn\n olr = xr.DataArray(olr_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.t.values,\\\n \"cell\":olr.cell})\n swu = xr.DataArray(swu_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swn = xr.DataArray(swn_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swd = swn + swu\n del swn\n alb = swu/swd\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for ICON SHL:\")\n rad = xr.open_dataset(ap.SHL_ICON_RAD)\n olr = reshape.reshape(\"ATHB_T\", rad, dim=2)\n swu = reshape.reshape(\"ASOU_T\", rad, dim=2)\n swn = reshape.reshape(\"ASOB_T\", rad, dim=2)\n olr_un = util.undomean(olr, xy=False)\n swu_un = util.undomean(swu, xy=False)\n swn_un = util.undomean(swn, xy=False)\n olr = xr.DataArray(olr_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.t.values,\\\n \"cell\":olr.cell})\n swu = xr.DataArray(swu_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swn = xr.DataArray(swn_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swd = swn + swu\n del swn\n alb = swu/swd\n else: \n raise Exception(\"Region not supported. Try 'TWP', 'NAU', 'SHL'.\")\n alb = alb.where((alb<1)&(alb>0))\n alb = alb[11::12]\n olr = olr[11::12]\n alb = alb[ind0:]\n olr = olr[ind0:]\n print(olr.shape, alb.shape)\n elif model.lower()==\"sam\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for SAM TWP:\")\n olr = xr.open_dataset(ap.TWP_SAM_OLR)[\"LWNTA\"][5::6,:,:]\n swn = xr.open_dataset(ap.TWP_SAM_SWN)[\"SWNTA\"][5::6,:,:]\n olr = olr[ind0:]\n swn = swn[ind0:]\n swd = get_swd(\"SAM\", \"TWP\")[5::6,:,:]\n elif region.lower()==\"nau\":\n print(\"Getting olr and albedo for SAM NAU:\")\n olr = xr.open_dataset(ap.NAU_SAM_OLR)[\"LWNTA\"][5::6,:,:]\n swn = xr.open_dataset(ap.NAU_SAM_SWN)[\"SWNTA\"][5::6,:,:]\n olr = olr[ind0:]\n swn = swn[ind0:]\n swd = get_swd(\"SAM\", \"NAU\")[5::6,:,:]\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for SAM SHL:\")\n olr = xr.open_dataset(ap.SHL_SAM_OLR)[\"LWNTA\"][5::6,:,:]\n swn = xr.open_dataset(ap.SHL_SAM_SWN)[\"SWNTA\"][5::6,:,:]\n olr = olr[ind0:]\n swn = swn[ind0:]\n swd = get_swd(\"SAM\", \"SHL\")[5::6,:,:]\n else: \n raise Exception(\"Region not supported. Try 'TWP', 'NAU', 'SHL'.\")\n print(swd.shape, swn.shape)\n swu = swd.values - swn.values\n print(\"... subtracted...\")\n alb = swu/swd.values\n print(\"... calculated alb...\")\n alb = xr.DataArray(alb, dims=olr.dims, coords=olr.coords, attrs={'long_name':'albedo at TOA (aver)',\n 'units':'None'})\n print(\"... made xarray...\")\n alb = alb.where((alb.values>0)&(swd.values>0))\n print(\"... made sure alb values are valid...\")\n print(\"... calculated mean\", alb.mean().values, \"...\")\n print(\"... returning olr and albedo\", olr.shape, alb.shape, \"...\")\n else: raise Exception(\"Model not supported at this time (try 'NICAM', 'FV3', 'ICON', 'SAM')\")\n return olr, alb",
"def extract(request):\n extract_type = request['extract_type']\n if extract_type in ['point', 'area_average', 'domain']:\n if extract_type == 'point':\n if 'longitude' not in request:\n print('Warning! \"longitude\" not supplied.')\n return\n \n if 'latitude' not in request:\n print('Warning! \"latitude\" not supplied.')\n return\n \n lon = np.float(request['longitude'])\n lat = np.float(request['latitude'])\n \n # Check if lon/lat values are valid\n if check_lonlat(lon, lat):\n pass\n else:\n return\n \n print('Extracting point TAMSAT rainfall estimates for longitude: %s and latitude: %s' % (lon, lat))\n \n elif (extract_type == 'area_average') or (extract_type == 'domain'):\n if 'N' not in request:\n print('Warning! \"N\" not supplied.')\n return\n \n if 'S' not in request:\n print('Warning! \"S\" not supplied.')\n return\n \n if 'W' not in request:\n print('Warning! \"W\" not supplied.')\n return\n \n if 'E' not in request:\n print('Warning! \"E\" not supplied.')\n return\n \n N = np.float(request['N'])\n S = np.float(request['S'])\n W = np.float(request['W'])\n E = np.float(request['E'])\n\n # Check if N/S/W/E values are valid\n if check_domain(N, S, W, E):\n pass\n else:\n return\n \n print('Extracting %s TAMSAT rainfall estimates for N: %s, S: %s, W: %s and E: %s' % (extract_type, str(N), str(S), str(W), str(E)))\n \n timestep = request['timestep']\n if 'resolution' not in request:\n resolution = 0.25\n else:\n resolution = request['resolution']\n \n # Check if timestep and resolution values are valid\n if check_input_values(timestep, resolution):\n pass\n else:\n return\n \n startdate = request['start_date']\n enddate = request['end_date']\n \n # Check if start and end dates are valid\n if check_dates(startdate, enddate):\n pass\n else:\n return\n \n version = request['version']\n \n # Check if version is valid\n allowed_versions = [3.1]\n if np.float(version) in allowed_versions:\n pass\n else:\n print('\"version\" not recognised. Current version(s) available: 3.1')\n return\n \n localdata_dir = request['localdata_dir']\n \n # List expected files\n daterange = determine_daterange(startdate, enddate)\n flist_expect = get_filenames(localdata_dir + '/tamsat/rfe', daterange, timestep, resolution, version)\n \n # List files that exist\n flist_exist = [f for f in flist_expect if os.path.exists(f)]\n if len(flist_exist) > 0:\n if len(flist_exist) != len(flist_expect):\n print('Warning! Not all files within date range found: %s expected, %s found.' % (len(flist_expect), len(flist_exist)))\n \n # Extract\n if len(flist_exist) > 0:\n ds_list = []\n for file in flist_exist:\n ds = xr.open_dataset(file)\n if extract_type == 'point':\n ds_list.append(ds.sel(lon=lon, lat=lat, method='nearest'))\n elif (extract_type == 'area_average') or (extract_type == 'domain'):\n if str(resolution) == '0.0375':\n ds_list.append(ds.sel(lon=slice(W, E), lat=slice(N, S)))\n else:\n ds_list.append(ds.sel(lon=slice(W, E), lat=slice(S, N)))\n \n ds.close()\n \n if len(ds_list) > 0:\n if extract_type == 'point':\n ds = xr.concat(ds_list, dim='time')\n elif extract_type == 'area_average':\n ds = xr.concat(ds_list, dim='time').mean(dim=['lon', 'lat'], skipna=True)\n elif extract_type == 'domain':\n ds = xr.concat(ds_list, dim='time')\n \n if extract_type == 'point':\n df = ds.to_dataframe().round(4)\n fname = 'TAMSATv' + str(version) + '_' + timestep + '_' + str(resolution) + '_' + str(lon) + '_' + str(lat) + '_' + startdate + '_' + enddate + '.csv'\n fname_full = localdata_dir + '/extracted_data/' + extract_type + '/' + fname\n if not os.path.exists(os.path.dirname(fname_full)):\n os.makedirs(os.path.dirname(fname_full))\n \n df.to_csv(fname_full, index=True, header=True)\n \n elif extract_type == 'area_average':\n df = ds.to_dataframe().round(4)\n fname = 'TAMSATv' + str(version) + '_' + timestep + '_' + str(resolution) + '_' + str(N) + '_' + str(S) + '_' + str(W) + '_' + str(E) + '_' + startdate + '_' + enddate + '.csv'\n fname_full = localdata_dir + '/extracted_data/' + extract_type + '/' + fname\n if not os.path.exists(os.path.dirname(fname_full)):\n os.makedirs(os.path.dirname(fname_full))\n \n df.to_csv(fname_full, index=True, header=True)\n \n elif extract_type == 'domain':\n fname = 'TAMSATv' + str(version) + '_' + timestep + '_' + str(resolution) + '_' + str(N) + '_' + str(S) + '_' + str(W) + '_' + str(E) + '_' + startdate + '_' + enddate + '.nc'\n fname_full = localdata_dir + '/extracted_data/' + extract_type + '/' + fname\n if not os.path.exists(os.path.dirname(fname_full)):\n os.makedirs(os.path.dirname(fname_full))\n \n ds.to_netcdf(fname_full)\n \n if os.path.exists(fname_full):\n print('Created file: %s' % fname_full)\n else:\n print('Warning! Unable to create file: %s' % fname_full)\n else:\n print('No files found, please check input parameters or that TAMSAT data exists for given parameters.')\n print('By default, 0.25 degree resolution data are used for extraction unless \"resolution\" argument is supplied.')\n else:\n print('Warning! \"extract_type\" not recognised. Excepted values are: \"point\", \"area_average\" or \"domain\".')",
"def por_r_herm(data):\n tdata = dc(data)\n\n m_e = tdata.get('m_e', np.array(2.))\n\n return por_r_harm(tdata)**(1./m_e)",
"def read_all_rh_obs(day, site_rh, rhDatadir, mod_data):\n\n # define array\n rh_obs = {}\n\n # get date string for obs of the main and following days\n doyStr = day.strftime('%Y%j')\n # doyStr2 = (day + dt.timedelta(hours=24)).strftime('%Y%j')\n\n for site, height in site_rh.iteritems():\n\n rh_obs[site] = {}\n\n # rh_fnames = [rhDatadir + site + '_' + doyStr + '_1min.nc',\n # rhDatadir + site + '_' + doyStr2 + '_1min.nc']\n\n rh_fnames = rhDatadir + site + '_' + doyStr + '_1min.nc'\n\n # read in all data\n data_obs = eu.netCDF_read(rh_fnames, vars=['RH', 'time'])\n data_obs['height'] = height\n\n # find nearest time in rh time\n # pull out ALL the nearest time idxs and differences\n t_idx = np.array([eu.nearest(data_obs['time'], t)[1] for t in mod_data[mod_data.keys()[0]]['time']])\n t_diff = np.array([eu.nearest(data_obs['time'], t)[2] for t in mod_data[mod_data.keys()[0]]['time']])\n\n # extract hours\n rh_obs[site]['RH'] = data_obs['RH'][t_idx]\n rh_obs[site]['height'] = data_obs['height']\n rh_obs[site]['time'] = [data_obs['time'][i] for i in t_idx]\n\n # overwrite t_idx locations where t_diff is too high with nans\n # only keep t_idx values where the difference is below 5 minutes\n bad = np.array([abs(i.days * 86400 + i.seconds) > 10 * 60 for i in t_diff])\n rh_obs[site]['RH'][bad] = np.nan\n\n # change flags to nans\n rh_obs[site]['RH'][np.where(rh_obs[site]['RH'] < 0)] = np.nan\n\n return rh_obs",
"def update_energy_cal():\n\n hit_conn = Connection('landau.hit')\n\n log_book = get_ts_logbook()\n energy_measured = log_book.Energy[log_book.Fuel == 'ETEST']\n energy_integrated = pd.Series()\n for shot in log_book.Shot[log_book.Fuel == 'ETEST']:\n hit_conn.openTree(\"hitsi3\", shot)\n try:\n flux_photodiode = np.array(hit_conn.get(\"\\\\TS_RUBY\"))\n flux_photodiode_t = np.array(hit_conn.get(\"DIM_OF(\\\\TS_RUBY)\"))\n except EOFError:\n print(\"WARNING: Error reading photodiode data from shot\", shot)\n # return -1\n pass\n\n flux_baseline = np.mean(flux_photodiode[0:np.int(np.around(np.size(flux_photodiode, 0)*photodiode_baseline_record_fraction))])\n flux_photodiode = flux_photodiode - flux_baseline\n\n energy_integrated = energy_integrated.append(pd.Series([np.trapz(flux_photodiode, flux_photodiode_t)]), ignore_index=True)\n\n\n\n # A = np.transpose(np.array([energy_measured, (np.ones_like(energy_measured))]))\n # m, c = np.linalg.lstsq(A, energy_integrated,rcond=None)[0]\n energy_integrated = energy_integrated.to_numpy().reshape(-1, 1)\n energy_measured = energy_measured.to_numpy().reshape(-1, 1)\n\n # Model initialization\n regression_model = LinearRegression()\n\n # Fit the data\n regression_model.fit(energy_measured, energy_integrated)\n\n # Predict\n energy_predicted = regression_model.predict(energy_measured)\n\n # model evaluation\n rmse = mean_squared_error(energy_integrated, energy_predicted)\n r2 = r2_score(energy_integrated, energy_predicted)\n m = regression_model.coef_[0][0]\n b = regression_model.intercept_[0]\n\n if PLOTS_ON == 1:\n # printing values\n print('Slope:', m)\n print('Intercept:', b)\n print('Root mean squared error: ', rmse)\n print('R2 score: ', r2)\n\n fig1, ax1 = plt.subplots()\n ax1.set_title(\"Linear regression\")\n ax1.set_xlabel(r\"$E_{meter} [J]$\")\n ax1.set_ylabel(r\"$E_{photodiode} [J]$\")\n ax1.plot(energy_measured, energy_integrated, 'o', label='Original data', markersize=2)\n ax1.plot(np.arange(0, 10), regression_model.predict(np.arange(0, 10).reshape(-1, 1)), label='Fitted line')\n # ax1.plot(np.arange(0, 10), np.arange(0, 10), color='k', ls='--', linewidth=0.5)\n ax1.legend()\n ax1.grid(ls='--')\n\n tree_write_safe(m, 'LASER_E_SLOPE')\n tree_write_safe(b, 'LASER_E_INT')\n\n with pm.Model() as linear_model:\n # Intercept\n intercept = pm.Normal('intercept', mu=0, sd=5)\n # intercept = pm.Uniform('intercept',lower=0, upper=1)\n\n # Slope\n # slope = pm.Normal('slope', mu=0, sd=10)\n slope = pm.Uniform('slope',lower=0, upper=1)\n\n # Standard deviation\n sigma = pm.HalfNormal('sigma', sd=10)\n\n # Estimate of mean\n mean = intercept + slope*energy_measured\n\n # Observed values\n Y_obs = pm.Normal('Y_obs', mu=mean, sd=sigma, observed=energy_integrated)\n\n # Sampler\n step = pm.NUTS(target_accept=0.95)\n\n # Posterior distribution\n linear_trace = pm.sample(2000, step, tune=4000)\n # linear_trace = pm.sample(1000, step, tune=2000)\n pm.summary(linear_trace)\n\n if PLOTS_ON == 1:\n pm.traceplot(linear_trace, figsize=(12, 12))\n pm.plot_posterior(linear_trace, figsize=(12, 10), text_size=20, credible_interval=0.95, round_to=12)\n # pm.forestplot(linear_trace)\n\n plt.figure(figsize=(8, 8))\n pm.plot_posterior_predictive_glm(linear_trace, samples=100, eval=np.linspace(0, 10, 100), linewidth=1,\n color='red', alpha=0.05, label='Bayesian Posterior Fits',\n lm=lambda x, sample: sample['intercept'] + sample['slope'] * x)\n plt.scatter(energy_measured[:500], energy_integrated[:500], s=12, alpha=0.8, c='blue', label='Observations')\n\n # bayes_prediction = (1e-07 - linear_trace['Intercept'])/linear_trace['slope']\n # plt.figure(figsize = (8, 8))\n # sns.kdeplot(bayes_prediction, label = 'Bayes Posterior Prediction')\n # plt.vlines(x = (1e-07 - c)/m,\n # ymin = 0, ymax = 2.5,\n # label = 'OLS Prediction',\n # colors = 'red', linestyles='--')\n print(pm.summary(linear_trace))\n\n tree_write_safe(linear_trace['slope'], 'LASER_E_SLOPE_B')\n tree_write_safe(linear_trace['intercept'], 'LASER_E_INT_B')",
"def process_era5_weather(path_nc, longs, lats):\n wisp = load_dataset(path_nc, 'wind')\n widi = load_dataset(path_nc, 'dwi')\n wh = load_dataset(path_nc, 'swh')\n wd = load_dataset(path_nc, 'mdts')\n wp = load_dataset(path_nc, 'mpts')\n rg_wisp = regrid_data(wisp, longs[:, 0], lats[0, :])\n rg_widi = regrid_data(widi, longs[:, 0], lats[0, :])\n rg_wh = regrid_data(wh, longs[:, 0], lats[0, :])\n rg_wd = regrid_data(wd, longs[:, 0], lats[0, :])\n rg_wp = regrid_data(wp, longs[:, 0], lats[0, :])\n wisp = None\n widi = None\n wh = None\n wd = None\n wp = None\n return rg_wisp, rg_widi, rg_wh, rg_wd, rg_wp",
"def calculate_error_metrics(model_name):\n sim_data_fldr = \"simulation_data\"\n reward_data_fldr = \"reward_data\"\n\n ee_pos_path = os.path.join(sim_data_fldr, \"ee_pos_\" + model_name + \".csv\")\n ee_goal_pos_path = os.path.join(sim_data_fldr, \"ee_goal_pos_\" + model_name + \".csv\")\n\n ee_z_force_path = os.path.join(sim_data_fldr, \"ee_z_contact_force_\" + model_name + \".csv\")\n ee_mean_z_force_path = os.path.join(sim_data_fldr, \"ee_z_running_mean_contact_force_\" + model_name + \".csv\")\n ee_goal_z_force_path = os.path.join(sim_data_fldr, \"ee_z_goal_contact_force_\" + model_name + \".csv\")\n\n ee_z_derivative_force_path = os.path.join(sim_data_fldr, \"ee_z_derivative_contact_force_\" + model_name + \".csv\")\n ee_goal_derivative_z_force_path = os.path.join(sim_data_fldr, \"ee_z_goal_derivative_contact_force_\" + model_name + \".csv\")\n\n ee_vel_path = os.path.join(sim_data_fldr, \"ee_vel_\" + model_name + \".csv\")\n ee_mean_vel_path = os.path.join(sim_data_fldr, \"ee_running_mean_vel_\" + model_name + \".csv\")\n ee_goal_vel_path = os.path.join(sim_data_fldr, \"ee_goal_vel_\" + model_name + \".csv\")\n\n ee_diff_quat_path = os.path.join(sim_data_fldr, \"ee_diff_quat_\" + model_name + \".csv\")\n\n pos_reward_path = os.path.join(reward_data_fldr, \"pos_\" + model_name + \".csv\")\n ori_reward_path = os.path.join(reward_data_fldr, \"ori_\" + model_name + \".csv\")\n force_reward_path = os.path.join(reward_data_fldr, \"force_\" + model_name + \".csv\")\n der_reward_path = os.path.join(reward_data_fldr, \"derivative_force_\" + model_name + \".csv\")\n vel_reward_path = os.path.join(reward_data_fldr, \"vel_\" + model_name + \".csv\")\n\n mse_ee_pos(ee_pos_path, ee_goal_pos_path, model_name)\n mse_ee_force(ee_z_force_path, ee_mean_z_force_path, ee_goal_z_force_path, model_name)\n mse_ee_der_force(ee_z_derivative_force_path, ee_goal_derivative_z_force_path, model_name)\n mse_ee_velocity(ee_vel_path, ee_mean_vel_path, ee_goal_vel_path, model_name)\n mean_ee_quat_diff(ee_diff_quat_path, model_name)\n mean_rewards(\n pos_reward_path,\n ori_reward_path,\n force_reward_path,\n der_reward_path,\n vel_reward_path,\n model_name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read in day and following day's data, for all rh obs.
|
def read_all_rh_obs(day, site_rh, rhDatadir, mod_data):
# define array
rh_obs = {}
# get date string for obs of the main and following days
doyStr = day.strftime('%Y%j')
# doyStr2 = (day + dt.timedelta(hours=24)).strftime('%Y%j')
for site, height in site_rh.iteritems():
rh_obs[site] = {}
# rh_fnames = [rhDatadir + site + '_' + doyStr + '_1min.nc',
# rhDatadir + site + '_' + doyStr2 + '_1min.nc']
rh_fnames = rhDatadir + site + '_' + doyStr + '_1min.nc'
# read in all data
data_obs = eu.netCDF_read(rh_fnames, vars=['RH', 'time'])
data_obs['height'] = height
# find nearest time in rh time
# pull out ALL the nearest time idxs and differences
t_idx = np.array([eu.nearest(data_obs['time'], t)[1] for t in mod_data[mod_data.keys()[0]]['time']])
t_diff = np.array([eu.nearest(data_obs['time'], t)[2] for t in mod_data[mod_data.keys()[0]]['time']])
# extract hours
rh_obs[site]['RH'] = data_obs['RH'][t_idx]
rh_obs[site]['height'] = data_obs['height']
rh_obs[site]['time'] = [data_obs['time'][i] for i in t_idx]
# overwrite t_idx locations where t_diff is too high with nans
# only keep t_idx values where the difference is below 5 minutes
bad = np.array([abs(i.days * 86400 + i.seconds) > 10 * 60 for i in t_diff])
rh_obs[site]['RH'][bad] = np.nan
# change flags to nans
rh_obs[site]['RH'][np.where(rh_obs[site]['RH'] < 0)] = np.nan
return rh_obs
|
[
"def read_wxt_obs(day, time, z):\n\n filepath = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + day.strftime('%Y') + '_15min.nc'\n wxt_obs = eu.netCDF_read(filepath, vars=['time', 'RH', 'Tair', 'press'])\n\n # extract out RH obs to match mod_time\n # pull out ALL the nearest time idxs and differences\n # the mod_data time is the same for all sites so can therefore use any site\n t_idx = np.array([eu.nearest(wxt_obs['time'], t)[1] for t in time])\n t_diff = np.array([eu.nearest(wxt_obs['time'], t)[2] for t in time])\n\n wxt_obs['RH'] = wxt_obs['RH'][t_idx] # [%]\n wxt_obs['Tair'] = wxt_obs['Tair'][t_idx] # [degC]\n wxt_obs['press'] = wxt_obs['press'][t_idx] # [hPa]\n wxt_obs['time'] = wxt_obs['time'][t_idx]\n # wxt_obs['rawtime'] = wxt_obs['rawtime'][t_idx]\n\n # overwrite t_idx locations where t_diff is too high with nans\n # only keep t_idx values where the difference is below 1 hour\n bad = np.array([abs(i.days * 86400 + i.seconds) > 60 * 60 for i in t_diff])\n\n wxt_obs['RH'][bad] = np.nan\n wxt_obs['Tair'][bad] = np.nan\n wxt_obs['press'][bad] = np.nan\n\n wxt_obs['time'][bad] = np.nan\n # wxt_obs['rawtime'][bad] = np.nan\n\n # create RH_frac using RH data\n wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0\n\n # calculate extra variables\n e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure\n e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure\n wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure\n wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]\n wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]\n wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]\n wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]\n\n # extend the wxt obs in height to match the dimensions of model RH\n # copy the obs so it is the same at all heights\n for var, item in wxt_obs.iteritems():\n if var not in ['time', 'rawtime']:\n # wxt_obs[var] = np.transpose(np.tile(item, (int(rh_frac.shape[1]), 1)))\n wxt_obs[var] = np.transpose(np.tile(item, (int(z.shape[-1]), 1)))\n\n return wxt_obs",
"def read_f_RH(mod_time, ceil_lam):\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH",
"def read_hourly_f_RH(mod_time, ceil_lam):\n\n import sys.platform as platform\n\n # file name and path\n if platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename)\n return f_RH",
"def _read_dwd(date, timezone, longitude, latitude, path):\n \n # initialize variables \n dwdpath = os.path.join(os.path.join(path, \"dwd\"))\n fields = [\"aswdifd_s\", \"aswdir_s\", \"t_2m\", \"t_g\"]\n \n lastForecast = None\n for f in range(len(fields)):\n # get date of latest forecast\n dirList = os.listdir(os.path.join(dwdpath, fields[f]))\n dirList.sort(reverse = True)\n if dirList[0].rsplit(\"_\", 2)[0] == 120:\n lastForecast = dirList[0].rsplit(\"_\", 2)[1]\n \n if lastForecast != None:\n # unpack compressed, latest forecast\n os.system(\"bunzip2 --keep `find \" + dwdpath + \" -name '*\" + lastForecast + \"*.bz2'`\")\n \n dates = []\n data = []\n for f in range(len(fields)):\n # list all extracted grib files\n dirList = glob.glob(os.path.join(dwdpath, fields[f], \"*\" + lastForecast + \"*.grib2\"))\n dirList.sort()\n \n lastValue = 0\n data.append([])\n \n if len(dirList) >= 48:\n for i in range(24):\n grb = pygrib.open(dirList[i])\n grb.seek(0)\n \n lat, lon = grb.latlons()\n i, j = _get_location_nearest(lat, lon, latitude, longitude)\n \n lastTimestamp = False\n firstTimestamp = False\n for g in grb:\n timestamp = datetime.datetime.strptime(str(g['validityDate']) + \" \" + '%0.0f'%(g['validityTime']/100.0), \"%Y%m%d %H\")\n \n if lastTimestamp:\n if f == 0:\n datestr = datetime.datetime.strftime(lastTimestamp, \"%Y-%m-%d %H\")\n dates.append(datestr)\n \n if fields[f] == \"aswdifd_s\" or fields[f] == \"aswdir_s\":\n diff = (timestamp - lastTimestamp).total_seconds() / 3600.0\n value = (1 / diff) * ((timestamp - firstTimestamp).total_seconds() / 3600 * g['values'][i, j] - (lastTimestamp - firstTimestamp).total_seconds() / 3600 * lastValue)\n else:\n value = g['values'][i, j]\n \n data[f].append(value)\n \n else:\n firstTimestamp = timestamp\n \n lastTimestamp = timestamp\n lastValue = g['values'][i, j]\n \n grb.close()\n \n if len(dates) > 0:\n csvpath = os.path.join(os.path.join(path, \"csv\"))\n with open(os.path.join(csvpath, \"DWD_\" + lastForecast + \".csv\"), 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter = \",\")\n line = [\"time\"]\n line.extend(fields)\n writer.writerow(line)\n for i in range(len(dates)):\n line = [dates[i] + \":00:00\"]\n for j in range(len(fields)):\n line.append(data[j][i])\n writer.writerow(line)\n \n # clean up\n os.system(\"find \" + dwdpath + \" -name '*\" + lastForecast + \"*.grib2' -exec rm -f {} \\;\")\n \n return None;",
"def readMETEO(filename, headonly=False, **kwargs):\n\n starttime = kwargs.get('starttime')\n endtime = kwargs.get('endtime')\n takehelium = kwargs.get('takehelium')\n debug = kwargs.get('debug')\n getfile = True\n\n heliumcols = []\n\n stream = DataStream()\n\n if debug:\n print (\"METEO: found RCS meteo data\")\n\n # Check whether header infromation is already present\n headers = {}\n\n theday = extractDateFromString(filename)\n\n try:\n if starttime:\n if not theday[-1] >= datetime.date(stream._testtime(starttime)):\n getfile = False\n if endtime:\n if not theday[0] <= datetime.date(stream._testtime(endtime)):\n getfile = False\n except:\n print(\"Did not recognize the date format\")\n # Date format not recognized. Need to read all files\n getfile = True\n\n fh = open(filename, 'rb')\n\n array = [[] for key in KEYLIST]\n fkeys = []\n felements = []\n\n if getfile:\n for line in fh:\n line = line.decode('utf-8',errors='ignore')\n if line.isspace():\n # blank line\n continue\n elif line.startswith(' '):\n continue\n elif line.startswith('Date'):\n # Read the header information\n #1) first get number of columns\n cols = line.split()\n if not takehelium:\n try:\n columns = [elem for elem in cols if not elem.startswith('He')]\n except:\n print(\"Found error in header\", filename)\n columns = []\n else:\n columns = cols\n for i, elem in enumerate(columns):\n if i > 1:\n key = KEYLIST[i-1]\n fkeys.append(key)\n headers['col-'+key] = elem.replace('_','')\n headers['unit-col-'+key] = '-'\n\n else:\n colsstr = line.split()\n if not takehelium:\n try:\n colsstr = [elem for i, elem in enumerate(colsstr) if not cols[i].startswith('He')]\n except:\n print(\"Found error in data sequence\", filename)\n #print colsstr\n break\n row = LineStruct()\n try:\n date = colsstr[0]+'-'+colsstr[1]\n array[0].append(date2num(datetime.strptime(date,\"%Y%m%d-%H%M%S\")))\n #row.time = date2num(datetime.strptime(date,\"%Y%m%d-%H%M%S\"))\n for i in range(2,len(colsstr)):\n key = KEYLIST[i-1]\n if not key.startswith('str') and not key in ['flag','comment','typ']:\n array[i-1].append(float(colsstr[i]))\n #exec('row.'+key+' = float(colsstr[i])')\n elif not key in ['flag','comment','typ']:\n array[i-1].append(str(float(colsstr[i])))\n #exec('row.'+key+' = str(float(colsstr[i]))')\n #row.typ = 'other'\n #stream.add(row)\n except:\n pass\n\n for idx,el in enumerate(array):\n array[idx] = np.asarray(el)\n\n headers['SensorDescription'] = 'RCS: filtered Meteorlogical data - Andreas Winkelbauer'\n headers['SensorName'] = 'Various Meteorology sensors'\n headers['SensorID'] = 'METEO_RCS2015_0001'\n headers['SensorType'] = 'Various'\n headers['SensorModule'] = 'RCS'\n headers['SensorDataLogger'] = 'F77'\n headers['SensorGroup'] = 'environment'\n headers['DataFormat'] = 'RCSMETEO v3.0'\n headers['col-t2'] = '430UEV' # Necessary because of none UTF8 coding in header\n headers['col-f'] = 'T'\n headers['unit-col-f'] = 'deg C'\n headers['col-z'] = 'Schneehoehe'\n headers['unit-col-z'] = 'cm'\n if not takehelium:\n headers['col-t1'] = 'rh'\n headers['unit-col-t1'] = 'percent'\n headers['col-var5'] = 'P'\n headers['unit-col-var5'] = 'hPa'\n headers['col-var1'] = 'Wind'\n headers['unit-col-var1'] = 'm/s'\n\n headers['SensorKeys'] = ','.join(fkeys)\n headers['SensorElements'] = ','.join([headers['col-'+key] for key in KEYLIST if key in fkeys])\n\n if debug:\n print (\"METEO: Successfully loaded METEO data\")\n return DataStream([LineStruct()], headers, np.asarray(array,dtype=object))",
"def get_data(day_num: int) -> Generator[str, None, None]:\n data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '..', 'data')\n with open(os.path.join(data_dir, f'day_{day_num}.txt'), 'r') as fobj:\n yield from fobj",
"def read_obs_hmv_declination(obscode, year_st, year_fn, folder):\n\n OBSY = obscode.upper()\n obsy = obscode.lower()\n # Read in the observatory data one year file at a time and construct filenames\n datareq = pd.DataFrame()\n for year in range(year_st, year_fn+1):\n ystr = str(year)\n file = obsy + ystr + 'dhor.hor'\n fpf = folder + '/' + file\n tmp = IAGA2002_Data_Reader(fpf)\n tmp.columns = [col.strip(OBSY) for col in tmp.columns]\n tmp = tmp.replace(99999.00, np.nan)\n # Calculate D (in degrees) if not given in the file\n if('D' not in tmp.columns):\n dvals, hvals, ivalsm, fvals = xyz2dhif(tmp['X'], tmp['Y'], tmp['Z'])\n tmp.insert(loc=1, column='D', value=dvals.values)\n else:\n # Convert the reported values to degrees\n tmp['D'] = tmp.D.values/60.0\n datareq = datareq.append(tmp[['D']])\n return(datareq)",
"def read_focal_temp(tyear, yday, tstart, tstop):\n#\n#--- if y daay is less than 8, read the data from the last year\n#\n if yday < 8:\n ifile = '/data/mta/Script/ACIS/Focal/Data/focal_plane_data_5min_avg_' + str(tyear-1)\n data = read_data_file(ifile, sep='\\s+', c_len=2)\n ftime = data[0]\n focal = data[1]\n else:\n ftime = []\n focal = []\n#\n#--- otherwise, just read this year\n#\n ifile = '/data/mta/Script/ACIS/Focal/Data/focal_plane_data_5min_avg_' + str(tyear)\n data = read_data_file(ifile, sep='\\s+', c_len=2)\n ftime = ftime + data[0]\n focal = focal + data[1]\n#\n#--- select out the data for the last 7 days\n#\n [ftime, focal] = select_data_by_date(ftime, focal, tstart, tstop)\n\n return [ftime, focal]",
"def cfht_weather_data(year, month, day, hour, minute,\n dir='/u/ghezgroup/code/python/keckdar/'):\n\n temperature = np.zeros(len(year), dtype=float)\n pressure = np.zeros(len(year), dtype=float)\n humidity = np.zeros(len(year), dtype=float)\n wind_speed = np.zeros(len(year), dtype=float)\n wind_dir = np.zeros(len(year), dtype=float)\n\n\n cfht_file = None\n\n for ii in range(len(year)):\n cfht_file_new = dir + 'cfht-wx.' + str(year[ii]) + '.' + \\\n str(month[ii]).zfill(2) + '.dat'\n\n if (cfht_file != cfht_file_new):\n cfht_file = cfht_file_new\n cfht = asciidata.open(cfht_file)\n\n atmYear = cfht[0].tonumpy()\n atmMonth = cfht[1].tonumpy()\n atmDay = cfht[2].tonumpy()\n atmHour = cfht[3].tonumpy()\n atmMin = cfht[4].tonumpy() # HST times\n atmWindSpeed = cfht[5].tonumpy() # km/h\n atmWindDir = cfht[6].tonumpy() # degrees\n atmTemp = cfht[7].tonumpy() # Celsius\n atmHumidity = cfht[8].tonumpy() # percent\n atmPressure = cfht[9].tonumpy() # mb pressure\n\n\n # Find the exact time match for year, month, day, hour\n idx = (np.where((atmDay == day[ii]) & (atmHour == hour[ii])))[0]\n \n if (len(idx) == 0):\n print 'Could not find DAR data for %4d-%2d-%2d %2d:%2d in %s' % \\\n (year, month, day, hour, minute, logFile)\n\n # Find the closest minute\n mdx = abs(atmMin[idx] - minute[ii]).argmin()\n match = idx[ mdx ]\n\n # Ambient Temperature (Celsius)\n temperature[ii] = atmTemp[match]\n\n # Pressure at the observer (millibar)\n # Should be around 760.0 millibars\n pressure[ii] = atmPressure[match]\n\n # Relative humidity (%)\n # Should be around 0.1 %\n humidity[ii] = atmHumidity[match]\n\n # Wind speed (km/h)\n wind_speed[ii] = atmWindSpeed[match]\n\n # Wind direction (degrees)\n wind_dir[ii] = atmWindDir[match]\n\n return temperature, pressure, humidity, wind_speed, wind_dir",
"def add_data2daily_netcdf_met(half_hourly_nc, daily_nc):\n hh_data = nC.Dataset(half_hourly_nc, 'r')\n d_data = nC.Dataset(daily_nc, 'a')\n # half hourly data\n hh_times = hh_data.variables['time']\n hh_air_temps = hh_data.variables['air_temp']\n hh_soil_temps = hh_data.variables['soil_temp']\n hh_rg = hh_data.variables['rg']\n is_day = hh_data.variables['is_day']\n # daily data\n daily_times = d_data.variables['time']\n rg_day = d_data.variables['rg']\n daily_mean_temp = d_data.variables['daily_mean_temp']\n daily_max_temp = d_data.variables['daily_max_temp']\n daily_min_temp = d_data.variables['daily_min_temp']\n mean_temp_day = d_data.variables['mean_temp_day']\n mean_temp_night = d_data.variables['mean_temp_night']\n daily_mean_soil_temp = d_data.variables['daily_mean_soil_temp']\n mean_soil_temp_day = d_data.variables['mean_soil_temp_day']\n mean_soil_temp_night = d_data.variables['mean_soil_temp_night']\n doy = d_data.variables['doy']\n day_length = d_data.variables['day_length']\n night_length = d_data.variables['night_length']\n\n time_lst = nC.num2date(daily_times[:], daily_times.units)\n nc_doy(doy, daily_times)\n nc_day_len(is_day, day_length, hh_times, time_lst)\n nc_night_len(is_day, night_length, hh_times, time_lst)\n print 'times done'\n # update rg values\n daily_rg_values(hh_rg, rg_day)\n print 'rg done'\n # update daily air temps\n daily_temperatures(hh_air_temps, daily_mean_temp, daily_max_temp, daily_min_temp)\n nc_day_mean_temp(is_day, hh_air_temps, mean_temp_day, hh_times, time_lst)\n nc_night_mean_temp(is_day, hh_air_temps, mean_temp_night, hh_times, time_lst)\n print 'temps done'\n # update daily soil temps\n daily_soil_temperatures(hh_soil_temps, daily_mean_soil_temp)\n nc_day_mean_temp(is_day, hh_soil_temps, mean_soil_temp_day, hh_times, time_lst)\n nc_night_mean_temp(is_day, hh_soil_temps, mean_soil_temp_night, hh_times, time_lst)\n print 'soil temps done'\n hh_data.close()\n d_data.close()\n return 'yay'",
"def read_all(self) -> dict:\n readings = { 'Datetime' : dt.datetime.now().strftime('%m-%d-%Y %H:%M:%S'),\n 'Temp' : self.read_temp(),\n 'pH' : self.read_pH(),\n 'EC' : self.read_ec(),\n 'Moisture' : self.read_moisture(),\n 'NPK' : self.read_npk()}\n return readings",
"def read_prizm_data(first_ctime, second_ctime, dir_top,\r\n subdir_100='data_100MHz', subdir_70='data_70MHz',\r\n subdir_switch='switch_data', read_100=True, read_70=True,\r\n read_switch=True, read_temp=True, verbose=False):\r\n\r\n # Initializes the dictionary which will hold the data.\r\n prizm_data = {}\r\n\r\n # Lists the typical '*.scio' and '*.raw' file names and their respective\r\n # data types.\r\n scio_files = [\r\n 'pol0.scio', 'pol1.scio', 'cross_real.scio', 'cross_imag.scio',\r\n ]\r\n raw_files = [\r\n ('acc_cnt1.raw', 'int32'), ('acc_cnt2.raw', 'int32'),\r\n ('fft_of_cnt.raw', 'int32'), ('fft_shift.raw', 'int64'),\r\n ('fpga_temp.raw', 'float'), ('pi_temp.raw', 'int32'),\r\n ('sync_cnt1.raw', 'int32'), ('sync_cnt2.raw', 'int32'),\r\n ('sys_clk1.raw', 'int32'), ('sys_clk2.raw', 'int32'),\r\n ]\r\n switch_files = [\r\n 'antenna.scio', 'res100.scio', 'res50.scio', 'short.scio', 'noise.scio', 'open.scio'\r\n ]\r\n temp_files = [\r\n ('temp_100A_bot_lna.raw', 'float'), ('temp_100_ambient.raw', 'float'),\r\n ('temp_100A_noise.raw', 'float'), ('temp_100A_switch.raw', 'float'),\r\n ('temp_100A_top_lna.raw', 'float'), ('temp_100B_bot_lna.raw', 'float'),\r\n ('temp_100B_noise.raw', 'float'), ('temp_100B_switch.raw', 'float'),\r\n ('temp_100B_top_lna.raw', 'float'), ('temp_70A_bot_lna.raw', 'float'),\r\n ('temp_70_ambient.raw', 'float'), ('temp_70A_noise.raw', 'float'),\r\n ('temp_70A_switch.raw', 'float'), ('temp_70A_top_lna.raw', 'float'),\r\n ('temp_70B_bot_lna.raw', 'float'), ('temp_70B_noise.raw', 'float'),\r\n ('temp_70B_switch.raw', 'float'), ('temp_70B_top_lna.raw', 'float'),\r\n ('temp_pi.raw', 'float'), ('temp_snapbox.raw', 'float'),\r\n ('time_pi.raw', 'float'), ('time_start_therms.raw', 'float'),\r\n ('time_stop_therms.raw', 'float'),\r\n ]\r\n \r\n # Lists the old and new time '.raw' file names, their respective data types,\r\n # and their new file nomemclature.\r\n old_time_raw_files = [\r\n ('time_start.raw', 'float', 'time_sys_start.raw'),\r\n ('time_stop.raw', 'float', 'time_sys_stop.raw'),\r\n ('time_rtc_start.raw', 'float', 'time_rtc_start.raw'),\r\n ('time_rtc_stop.raw', 'float', 'time_rtc_stop.raw'),\r\n ]\r\n new_time_raw_files = [\r\n ('time_sys_start.raw', 'float', 'time_sys_start.raw'),\r\n ('time_sys_stop.raw', 'float', 'time_sys_stop.raw'),\r\n ('time_rtc_start.raw', 'float', 'time_rtc_start.raw'),\r\n ('time_rtc_stop.raw', 'float', 'time_rtc_stop.raw'),\r\n ]\r\n\r\n # Primary Data:\r\n # Checks whether `read_100` and `read_70` are `True`. If so, their\r\n # respective keys are stored in the list `antennas`, and also created as\r\n # entries for the `prizm_data` dictionary. The input subdirectories\r\n # `subdir_100` and `subdir_70` are also stored in the `subdirs` dictionary\r\n # for future manipulation.\r\n antennas = []\r\n subdirs = {}\r\n if read_100:\r\n antennas.append('100MHz')\r\n subdirs['100MHz'] = subdir_100\r\n if read_70:\r\n antennas.append('70MHz')\r\n subdirs['70MHz'] = subdir_70\r\n\r\n # Verbose message.\r\n if (verbose and len(antennas) > 0):\r\n print('Reading primary data from the ', antennas, 'atennas.')\r\n\r\n # Reads the primary data products for the 70 MHz and 100 Mhz antennas.\r\n for antenna in antennas:\r\n prizm_data[antenna] = {}\r\n dirs = dir_from_ctime(first_ctime,\r\n second_ctime,\r\n dir_top + '/' + subdirs[antenna]\r\n )\r\n\r\n # Reads all '.scio' files in `dirs` whose names match the entries in\r\n # `scio_files`. The results are stored in the appropriate antenna\r\n # dictionary entry of `prizm_data` with key given by the file name.\r\n for file_name in scio_files:\r\n prizm_data[antenna][file_name] = read_scio_file(dirs,\r\n file_name,\r\n verbose=verbose)\r\n\r\n # Checks whether `first_ctime` is smaller than 1524730000. If so,\r\n # attempts to read the timestamp information from those files listed in\r\n # `old_time_raw_files`. This step is needed because at that ctime in\r\n # 2018 the timestamp files were renamed from 'time_start.raw' and\r\n # 'time_stop.raw' to 'time_sys_start.raw' and 'time_sys_stop.raw'. Since\r\n # the timestamp information is essential for any analysis of the PRIZM\r\n # data, checking whether these older files are available is essential.\r\n # Notice that despite having different names, the data dictionary keys\r\n # referring to such files still reflect the more recent file\r\n # nomenclature in order to keep the resulting `prizm_data` dictionary\r\n # compatible with other functions defined in this module.\r\n if first_ctime < 1524730000:\r\n # Verbose message.\r\n if verbose:\r\n print('Attempting to read the older timestamp files.')\r\n\r\n # Reads all '.raw' files in `dirs` whose names match the entries in\r\n # `old_time_raw_files`. The results are stored in the appropriate\r\n # antenna dictionary entry of `prizm_data` under a key given by the\r\n # more recent file nomenclature associated with those files.\r\n for old_file_name, dtype, file_name in old_time_raw_files:\r\n prizm_data[antenna][file_name] = read_raw_file(dirs,\r\n old_file_name,\r\n verbose=verbose,\r\n dtype=dtype)\r\n else:\r\n # In case `first_ctime` does not fall within the time period which\r\n # corresponds to the older timestamp files, that means only newer\r\n # files will be read. The `prizm_data` timestamp entries are thus\r\n # initialized with an empty NumPy array. This guarantees the next\r\n # reading operation below can be performed even if no old timestamp\r\n # data has been yet recorded in `prizm_data`.\r\n for old_file_name, dtype, file_name in old_time_raw_files:\r\n prizm_data[antenna][file_name] = np.array([])\r\n\r\n # Checks whether `second_ctime` is larger than 1524730000. If so,\r\n # attempts to read the timestamp information from those files listed in\r\n # `new_time_raw_files`. Here the operation concatenates the output of\r\n # `read_raw_file` to the `prizm_data` timestamp entries initialized\r\n # above. This guarantees that the reading operation will work as\r\n # expected in all scenarios, i.e., when there is only old data, or\r\n # only new data, or a mix of old and new data.\r\n if second_ctime > 1524730000:\r\n # Verbose message.\r\n if verbose:\r\n print('Attempting to read the newer timestamp files.')\r\n\r\n # Reads all '.raw' files in `dirs` whose names match the entries in\r\n # `new_time_raw_files`. The results are temporatily stored in the\r\n # NumPy array `new_time_data`. It is then concatenated to the\r\n # timestamp information already stored or initialized in the antenna\r\n # dictionary entry of `prizm_data`.\r\n for new_file_name, dtype, file_name in new_time_raw_files:\r\n new_time_data = read_raw_file(dirs,\r\n new_file_name,\r\n verbose=verbose,\r\n dtype=dtype)\r\n prizm_data[antenna][file_name] = np.concatenate((\r\n prizm_data[antenna][file_name],\r\n new_time_data\r\n ))\r\n\r\n # Reads all remaining '.raw' files in `dirs` whose names match the\r\n # entried in `raw_files`. The results are stored in the appropriate\r\n # atenna dictionary entry of `prizm_data` with key given by the file\r\n # name.\r\n for file_name, dtype in raw_files:\r\n prizm_data[antenna][file_name] = read_raw_file(dirs,\r\n file_name,\r\n verbose=verbose,\r\n dtype=dtype)\r\n\r\n # Auxiliary Data:\r\n # Checks whether `read_switch` is `True`. If so, the key `switch` is added\r\n # to the `prizm_data` dictionary, creates the list of directories where the\r\n # switch data is located, and proceeds to read the data.\r\n if read_switch:\r\n prizm_data['switch'] = {}\r\n dirs = dir_from_ctime(first_ctime,\r\n second_ctime,\r\n dir_top + '/' + subdir_switch)\r\n\r\n # Verbose message.\r\n if verbose:\r\n print('Reading the auxiliary switch data.')\r\n\r\n # Reads all '.scio' files in `dirs` whose names match the entries in\r\n # `switch_files`. The results are stored as dictionaries in\r\n # `prizm_data['switch']`, with keys given by the file names being read.\r\n for file_name in switch_files:\r\n prizm_data['switch'][file_name] = read_scio_file(dirs,\r\n file_name,\r\n verbose=verbose)\r\n\r\n # Checks whether `read_temp` is `True`. If so, the key `temp` is added\r\n # to the `prizm_data` dictionary, creates the list of directories where\r\n # the temperature data is located, and proceeds to read the data.\r\n if read_temp:\r\n prizm_data['temp'] = {}\r\n dirs = dir_from_ctime(first_ctime,\r\n second_ctime,\r\n dir_top + '/' + subdir_switch)\r\n\r\n # Verbose message.\r\n if verbose:\r\n print('Reading the auxiliary temperature data.')\r\n\r\n # Reads all '.scio' files in `dirs` whose names match the entries in\r\n # `switch_files`. The results are stored as dictionaries in\r\n # `prizm_data['switch']`, with keys given by the file names being read.\r\n for file_name, dtype in temp_files:\r\n prizm_data['temp'][file_name] = read_raw_file(dirs,\r\n file_name,\r\n verbose=verbose,\r\n dtype=dtype)\r\n\r\n # Returns the `prizm_data` found in the given time range.\r\n return prizm_data",
"def read_f_RH(ceil_lam):\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH",
"def read_data_by_days(sc, path, start, end):\n day_list = get_day_range(start, end)\n print \"get data from {0} to {1}\".format(day_list[0], day_list[-1])\n day_paths = map(lambda x: \"{0}/day={1}\".format(path, x), day_list)\n day_paths_str = \",\".join(day_paths)\n rdd = sc.textFile(day_paths_str)\n return rdd",
"def get_data(datapath, tstart, tend):\n\n # Define empty streams\n trN1 = Stream()\n trN2 = Stream()\n trNZ = Stream()\n trNP = Stream()\n\n # Time iterator\n t1 = tstart\n\n # Cycle through each day within time range\n while t1 < tend:\n\n # Time stamp used in file name\n tstamp = str(t1.year).zfill(4)+'.'+str(t1.julday).zfill(3)+'.'\n\n # Cycle through directory and load files\n p = datapath.glob('*.*')\n files = [x for x in p if x.is_file()]\n for file in files:\n if fnmatch.fnmatch(str(file), '*' + tstamp + '*1.SAC'):\n tr = read(str(file))\n trN1.append(tr[0])\n elif fnmatch.fnmatch(str(file), '*' + tstamp + '*2.SAC'):\n tr = read(str(file))\n trN2.append(tr[0])\n elif fnmatch.fnmatch(str(file), '*' + tstamp + '*Z.SAC'):\n tr = read(str(file))\n trNZ.append(tr[0])\n elif fnmatch.fnmatch(str(file), '*' + tstamp + '*H.SAC'):\n tr = read(str(file))\n trNP.append(tr[0])\n\n # Increase increment\n t1 += 3600.*24.\n\n # Fill with empty traces if components are not found\n ntr = len(trNZ)\n if not trN1 and not trN2:\n for i in range(ntr):\n trN1.append(Trace())\n trN2.append(Trace())\n if not trNP:\n for i in range(ntr):\n trNP.append(Trace())\n\n if ntr > 0:\n # Check that all sampling rates are equal - otherwise resample\n if trNZ[0].stats.sampling_rate != trNP[0].stats.sampling_rate:\n\n # These checks assume that all seismic data have the same sampling\n if trNZ[0].stats.sampling_rate < trNP[0].stats.sampling_rate:\n trNP.resample(trNZ[0].stats.sampling_rate, no_filter=False)\n else:\n trNZ.resample(trNP[0].stats.sampling_rate, no_filter=False)\n if trN1:\n trN1.resample(trNP[0].stats.sampling_rate, no_filter=False)\n if trN2:\n trN2.resample(trNP[0].stats.sampling_rate, no_filter=False)\n\n return trN1, trN2, trNZ, trNP",
"def read_historicaldata():\r\n hist_rf = pd.read_csv('full2010-2018data.csv')\r\n hist_rf.Datetime = pd.to_datetime(hist_rf.Datetime) # This is the slow part of reading historical data. \r\n hist_rf.set_index('Datetime', inplace = True) \r\n \r\n # hist_rf['Date'] = hist_rf.index.date\r\n # hist_rf['Time'] = hist_rf.index.time\r\n # hist_rf['Year'] = hist_rf.index.year\r\n # hist_rf['Month'] = hist_rf.index.month \r\n # hist_rf['Day'] = hist_rf.index.day\r\n # hist_rf['Week'] = hist_rf.index.week\r\n \r\n return hist_rf",
"def _generate_external_data(self):\n # Reading external sources of data.\n weather = self._read_file(self._f_names['weather'])\n airports = self._read_file(self._f_names['airports'])\n gdp = self._read_file(self._f_names['gdp'])\n jet_fuel = self._read_file(self._f_names['jet_fuel'])\n jet_fuel[\"Date\"] = pd.to_datetime(jet_fuel[\"Date\"])\n\n # Adding some features to the weather data.\n weather.loc[:, 'holidate'] = HolidaysManager.to_holiday(weather.loc[:, 'Date'])\n weather.loc[:, 'is_holiday'] = weather.loc[:, 'holidate'].apply(HolidaysManager.is_holiday)\n weather.loc[:, 'is_beginning_holiday'] = weather.loc[:, 'holidate'].apply(HolidaysManager.is_beginning_holiday)\n weather.loc[:, 'is_end_holiday'] = weather.loc[:, 'holidate'].apply(HolidaysManager.is_end_holiday)\n\n # Get distance in days to the closest holiday\n weather[\"Date\"] = pd.to_datetime(weather[\"Date\"])\n weather[\"dumb1\"] = weather[\"Date\"][weather[\"is_holiday\"]]\n weather[\"dumb2\"] = weather[\"Date\"][weather[\"is_holiday\"]]\n weather[\"dumb1\"] = weather[\"dumb1\"].fillna(method=\"ffill\").fillna(method=\"bfill\")\n weather[\"dumb2\"] = weather[\"dumb2\"].fillna(method=\"bfill\").fillna(method=\"ffill\")\n weather[\"distance_to_previous\"] = pd.to_numeric(np.abs(weather[\"dumb1\"] - weather[\"Date\"]).dt.days)\n weather[\"distance_to_next\"] = pd.to_numeric(np.abs(weather[\"dumb2\"] - weather[\"Date\"]).dt.days)\n weather[\"holidays_distance\"] = np.minimum(weather.distance_to_previous, weather.distance_to_next)\n # print(weather.head())\n\n # weather.drop('holidate', axis=1, inplace=True)\n weather.drop(['holidate', 'dumb1', 'dumb2', 'distance_to_previous', 'distance_to_next'], axis=1, inplace=True)\n\n # Stripping the iso region from its prefix to allow a join on this column.\n airports.loc[:, 'iso_region'] = airports.loc[:, 'iso_region'].apply(\n lambda x: x.replace('US-', '')\n )\n\n # Merging the data to create external_data\n external_data = weather.merge(\n airports,\n how='left',\n left_on='AirPort',\n right_on='iata_code'\n )\n\n external_data = external_data.merge(\n gdp,\n how='left',\n left_on=['iso_region', 'municipality'],\n right_on=['state', 'city']\n )\n\n external_data = external_data.merge(\n jet_fuel,\n how='left',\n left_on=['Date'],\n right_on=['Date']\n )\n\n external_data['fuel_price'] = external_data['fuel_price'].fillna(method='ffill')\n\n rm_cols = [col for col in external_data.columns if 'Unnamed' in col]\n\n self._Data = external_data.drop(rm_cols, axis=1)\n self._Data = external_data.filter(\n self._ed_model_columns\n )",
"def read_data(fdh):\r\n print(\"READING FILTERED DATA\")\r\n arcpy.env.workspace = scratch\r\n fiber = []\r\n scs = []\r\n conduit = []\r\n vaults = []\r\n trench = []\r\n\r\n fcs = [\"final_fiber\", \"final_scs\", \"final_con\", \"final_vaults\", \"trench\"]\r\n cur_fields = [fiber_fields, sc_fields, con_fields, vault_fields, trench_fields]\r\n out_data = [fiber, scs, conduit, vaults, trench]\r\n # out_data = []\r\n i = 0\r\n domains = arcpy.da.ListDomains(scratch)\r\n for fc, req_fields, out_fc in zip(fcs, cur_fields, out_data):\r\n # print \"Read FINAL FC: {0}\".format(fc)\r\n out_data.append([])\r\n allfields = arcpy.ListFields(fc)\r\n field_map = {o.name.lower(): o for o in allfields}\r\n fields = [field_map[x.lower()] for x in req_fields]\r\n with arcpy.da.SearchCursor(scratch + \"\\\\\" + fc, [x.name for x in fields],\r\n \"fdhid like '{0}'\".format(fdh)) as cursor:\r\n for row in cursor:\r\n print(row)\r\n temp = [\"\" if x is None else (x if fields[ind].domain == \"\" else\r\n [y for y in domains if y.name == fields[ind].domain][0].codedValues[x])\r\n for ind, x in enumerate(row)]\r\n for i, z in enumerate(temp):\r\n try:\r\n temp[i] = float(z)\r\n except:\r\n temp[i] = z\r\n out_fc.append(temp)\r\n #print(\"Read FINAL FC: {0} len: {1}\".format(fc, len(out_fc)))\r\n\r\n return (fiber, scs, conduit, vaults, trench)",
"def data(ignore_date=False):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read in RH observations from KSSW, time match them to the model data, and extend them in height to match the dimensions of model RH
|
def read_wxt_obs(day, time, z):
filepath = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \
'Davis_BGH_' + day.strftime('%Y') + '_15min.nc'
wxt_obs = eu.netCDF_read(filepath, vars=['time', 'RH', 'Tair', 'press'])
# extract out RH obs to match mod_time
# pull out ALL the nearest time idxs and differences
# the mod_data time is the same for all sites so can therefore use any site
t_idx = np.array([eu.nearest(wxt_obs['time'], t)[1] for t in time])
t_diff = np.array([eu.nearest(wxt_obs['time'], t)[2] for t in time])
wxt_obs['RH'] = wxt_obs['RH'][t_idx] # [%]
wxt_obs['Tair'] = wxt_obs['Tair'][t_idx] # [degC]
wxt_obs['press'] = wxt_obs['press'][t_idx] # [hPa]
wxt_obs['time'] = wxt_obs['time'][t_idx]
# wxt_obs['rawtime'] = wxt_obs['rawtime'][t_idx]
# overwrite t_idx locations where t_diff is too high with nans
# only keep t_idx values where the difference is below 1 hour
bad = np.array([abs(i.days * 86400 + i.seconds) > 60 * 60 for i in t_diff])
wxt_obs['RH'][bad] = np.nan
wxt_obs['Tair'][bad] = np.nan
wxt_obs['press'][bad] = np.nan
wxt_obs['time'][bad] = np.nan
# wxt_obs['rawtime'][bad] = np.nan
# create RH_frac using RH data
wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0
# calculate extra variables
e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure
e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure
wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure
wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]
wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]
wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]
wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]
# extend the wxt obs in height to match the dimensions of model RH
# copy the obs so it is the same at all heights
for var, item in wxt_obs.iteritems():
if var not in ['time', 'rawtime']:
# wxt_obs[var] = np.transpose(np.tile(item, (int(rh_frac.shape[1]), 1)))
wxt_obs[var] = np.transpose(np.tile(item, (int(z.shape[-1]), 1)))
return wxt_obs
|
[
"def setup_hds(self):\n if self.hds_kperk is None or len(self.hds_kperk) == 0:\n return\n from .gw_utils import setup_hds_obs\n # if len(self.hds_kperk) == 2:\n # try:\n # if len(self.hds_kperk[0] == 2):\n # pass\n # except:\n # self.hds_kperk = [self.hds_kperk]\n oc = self.m.get_package(\"OC\")\n if oc is None:\n raise Exception(\"can't find OC package in model to setup hds grid obs\")\n if not oc.savehead:\n raise Exception(\"OC not saving hds, can't setup grid obs\")\n hds_unit = oc.iuhead\n hds_file = self.m.get_output(unit=hds_unit)\n assert os.path.exists(os.path.join(self.org_model_ws,hds_file)),\\\n \"couldn't find existing hds file {0} in org_model_ws\".format(hds_file)\n shutil.copy2(os.path.join(self.org_model_ws,hds_file),\n os.path.join(self.m.model_ws,hds_file))\n inact = None\n if self.m.lpf is not None:\n inact = self.m.lpf.hdry\n elif self.m.upw is not None:\n inact = self.m.upw.hdry\n if inact is None:\n skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x\n else:\n skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x\n print(self.hds_kperk)\n setup_hds_obs(os.path.join(self.m.model_ws,hds_file),\n kperk_pairs=self.hds_kperk,skip=skip)\n self.frun_post_lines.append(\"pyemu.gw_utils.apply_hds_obs('{0}')\".format(hds_file))\n self.tmp_files.append(hds_file)",
"def read_all_rh_obs(day, site_rh, rhDatadir, mod_data):\n\n # define array\n rh_obs = {}\n\n # get date string for obs of the main and following days\n doyStr = day.strftime('%Y%j')\n # doyStr2 = (day + dt.timedelta(hours=24)).strftime('%Y%j')\n\n for site, height in site_rh.iteritems():\n\n rh_obs[site] = {}\n\n # rh_fnames = [rhDatadir + site + '_' + doyStr + '_1min.nc',\n # rhDatadir + site + '_' + doyStr2 + '_1min.nc']\n\n rh_fnames = rhDatadir + site + '_' + doyStr + '_1min.nc'\n\n # read in all data\n data_obs = eu.netCDF_read(rh_fnames, vars=['RH', 'time'])\n data_obs['height'] = height\n\n # find nearest time in rh time\n # pull out ALL the nearest time idxs and differences\n t_idx = np.array([eu.nearest(data_obs['time'], t)[1] for t in mod_data[mod_data.keys()[0]]['time']])\n t_diff = np.array([eu.nearest(data_obs['time'], t)[2] for t in mod_data[mod_data.keys()[0]]['time']])\n\n # extract hours\n rh_obs[site]['RH'] = data_obs['RH'][t_idx]\n rh_obs[site]['height'] = data_obs['height']\n rh_obs[site]['time'] = [data_obs['time'][i] for i in t_idx]\n\n # overwrite t_idx locations where t_diff is too high with nans\n # only keep t_idx values where the difference is below 5 minutes\n bad = np.array([abs(i.days * 86400 + i.seconds) > 10 * 60 for i in t_diff])\n rh_obs[site]['RH'][bad] = np.nan\n\n # change flags to nans\n rh_obs[site]['RH'][np.where(rh_obs[site]['RH'] < 0)] = np.nan\n\n return rh_obs",
"def read_f_RH(mod_time, ceil_lam):\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename, vars=['RH', 'f(RH) MURK', 'radii_range'])\n return f_RH",
"def read_hourly_f_RH(mod_time, ceil_lam):\n\n import sys.platform as platform\n\n # file name and path\n if platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH)_NK_'+str(ceil_lam)+'nm.nc'\n\n # read data\n # f_RH = netCDF_read(miedir + filename, vars=['Relative Humidity', 'f(RH) MURK', 'radii_range_nm'])\n f_RH = netCDF_read(miedir + filename)\n return f_RH",
"def write_harmonisation_residual_files(self, directory,\r\n software, software_version, software_tag, job_id, matchup_dataset,\r\n lm, k_res, H_res=None):\r\n\r\n total = 0\r\n idx = [0]\r\n for n in lm[:, 2]:\r\n total += n\r\n idx.append(total)\r\n n_mu = idx[-1]\r\n\r\n # Write file for each match-up series\r\n for i, pair in enumerate(lm):\r\n\r\n # Get required data from lm variable\r\n sensor_i = pair[0]\r\n sensor_j = pair[1]\r\n n_mu = pair[2]\r\n\r\n istart = idx[i]\r\n iend = idx[i+1]\r\n\r\n # define file path\r\n fname = \"_\".join((\"harm\", software, software_version, software_tag, job_id,\r\n matchup_dataset, \"res\", str(sensor_i), str(sensor_j))) + \".nc\"\r\n path = pjoin(directory, fname)\r\n\r\n # open netCDF file\r\n rootgrp = Dataset(path, 'w')\r\n\r\n # set attributes\r\n rootgrp.software = software\r\n rootgrp.software_version = software_version\r\n rootgrp.software_tag = software_tag\r\n rootgrp.job_id = job_id\r\n rootgrp.matchup_dataset = matchup_dataset\r\n rootgrp.sensor_i_name = sensor_i\r\n rootgrp.sensor_j_name = sensor_j\r\n\r\n # create dimensions\r\n m = rootgrp.createDimension('m', n_mu)\r\n if H_res is not None:\r\n n_col = rootgrp.createDimension('n_col', H_res.shape[1])\r\n\r\n # create variables\r\n\r\n # > Harmonisation match-up adjustment factor residuals\r\n k_res_var = rootgrp.createVariable('k_res', 'f8', ('m',), zlib=True, complevel=9)\r\n k_res_var.description = \"k residuals\"\r\n\r\n if H_res is not None:\r\n # > Harmonisation match-up date residuals\r\n H_res_var = rootgrp.createVariable('H_res', 'f8', ('m', 'n_col',), zlib=True, complevel=9)\r\n H_res_var.description = \"Data residuals\"\r\n\r\n # store data\r\n k_res_var[:] = k_res[istart:iend]\r\n if H_res is not None:\r\n H_res_var[:, :] = H_res[istart:iend, :]\r\n\r\n # close netCDF file\r\n rootgrp.close()\r\n\r\n return 0",
"def read_shef():\r\n print '[info]: Reading water levels from SHEF data bank, converting to LMSL.'\r\n shef = parse_shef(time_list())\r\n shef = QC(shef)\r\n data = generate_bias_list(shef)\r\n convert_table = load_meter_mllw_to_msl()\r\n obs=defaultdict(lambda: defaultdict(list))\r\n\r\n for item in data.keys():\r\n ids = data[item]['COOPS-ID']\r\n obs[ids]['NWS_ID'] = data[item]['SHEF-ID']\r\n obs[ids]['lon'] = data[item]['lon']\r\n obs[ids]['lat'] = data[item]['lat']\r\n obs[ids]['dates'] = shef[item].keys()\r\n num = float(convert_table[item]['factor'])\r\n obs[ids]['values'] = np.multiply(0.3048,shef[item].values()) + num\r\n return obs",
"def get_model(model,fc_date,init_date=None,leadtime=None):\n from misc import haversine\n from model_specs import model_dict\n print (\"Get model data according to selected date ....\")\n if init_date is None:\n print (\"leadtime:\",leadtime,\"h\")\n else:\n print (\"init_date:\",init_date)\n print (\"fc_date:\",fc_date)\n if model == 'ARCMFC':\n filestr = (model_dict[model]['path']\n + fc_date.strftime('%Y%m%d')\n + init_date.strftime(model_dict[model]['file_template']))\n elif (model == 'mwam4' or model=='mwam8'):\n if fc_date == init_date:\n filestr = (init_date.strftime(model_dict[model]['path_template'])\n + init_date.strftime(model_dict[model]['file_template']))\n else:\n if leadtime%6!=0:\n print (\"leadtime needs to be multiple of 6h\")\n print (\"exit loop ...\")\n #sys.exit()\n else:\n tmpdate = fc_date - timedelta(hours=leadtime)\n filedate = tmpdate\n filestr = (filedate.strftime(model_dict[model]['path_template'])\n + filedate.strftime(model_dict[model]['file_template']))\n del tmpdate\n print (filestr)\n f = netCDF4.Dataset(filestr,'r')\n model_lons = f.variables[model_dict[model]['lons']][:]\n model_lats = f.variables[model_dict[model]['lats']][:]\n model_time = f.variables[model_dict[model]['time']][:]\n # Hs [time,lat,lon]\n model_Hs = f.variables[model_dict[model]['Hs']][:].squeeze()\n f.close()\n model_basetime = model_dict[model]['basetime']\n model_time_dt=[]\n for element in model_time:\n model_time_dt.append(model_basetime\n + timedelta(seconds=element))\n model_time_dt_valid = [model_time_dt[model_time_dt.index(fc_date)]]\n model_hs_valid = model_Hs[model_time_dt.index(fc_date),:,:]\n return model_time_dt, model_hs_valid, model_lons, model_lats",
"def calc_Q_ext_wet(ceil_lam, r_md, RH):\n\n from ellUtils import nearest\n\n def read_f_RH(ceil_lam):\n \"\"\"\n Read in the f_RH data from csv\n EW 21/02/17\n\n :param filename:\n :return: data = {RH:... f_RH:...}\n\n filename must be in the form of 'calculated_ext_f(RH)_[ceil_lambda]nm.csv'\n \"\"\"\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH\n\n def read_Q_dry_ext(ceil_lam):\n \"\"\"\n Read in the Q_ext for dry murk.\n EW 21/02/17\n\n :param filename:\n :param lam:\n :return: Q_ext_dry = {radius:... Q_ext_dry:...}\n\n Requres the wavelength to be passed, just so in the future, the 910 nm file is not incorrectly used by mistake when\n it should use the file for another wavelength.\n \"\"\"\n\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n filename = 'calculated_Q_ext_' + str(ceil_lam) + 'nm.csv'\n\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n Q_ext_dry = {'radius': raw[:, 0],\n 'Q_ext': raw[:, 1]}\n\n return Q_ext_dry\n\n RH_factor = 0.01 # Relative Humidity in 0.38 not 38%\n\n # calculate Q_ext_wet\n f_RH = read_f_RH(ceil_lam)\n Q_ext_dry = read_Q_dry_ext(ceil_lam)\n\n # create matric of Q_ext_dry based on r_md\n Q_ext_dry_matrix = np.empty(r_md.shape)\n f_RH_matrix = np.empty(RH.shape)\n\n # find Q_ext dry, given the dry radius matrix\n if r_md.size != 1:\n for i in range(r_md.shape[0]):\n idx = nearest(Q_ext_dry['radius'], r_md[i])[1]\n Q_ext_dry_matrix[i] = Q_ext_dry['Q_ext'][idx]\n\n else:\n idx = nearest(Q_ext_dry['radius'], r_md)[1]\n Q_ext_dry_matrix = Q_ext_dry['Q_ext'][idx]\n\n # find f(RH), given the RH matrix\n # need RH factor as f_RH['RH'] in units of frac not percentage\n if RH.size != 1:\n for i in range(RH.shape[0]):\n idx = nearest(f_RH['RH'], RH_factor * RH[i])[1]\n f_RH_matrix[i] = f_RH['f_RH'][idx]\n else:\n idx = nearest(f_RH['RH'], RH_factor * RH)[1]\n f_RH_matrix = f_RH['f_RH'][idx]\n\n # calculate Q_ext_wet\n Q = Q_ext_dry_matrix * f_RH_matrix\n # print np.mean(Q_ext_dry_matrix[:,:20])\n\n return Q, Q_ext_dry_matrix, f_RH_matrix",
"def convert_to_hic_format(self):\n\n if self.cfg.tal_mode == \"wt\":\n hek_mat = pd.read_csv(self.hek_file, sep=\"\\t\")\n elif self.cfg.tal_mode == \"tal1_ko\":\n hek_mat = pd.read_csv(self.tal1ko_file, sep=\"\\t\")\n elif self.cfg.tal_mode == \"lmo2_ko\":\n hek_mat = pd.read_csv(self.lmo2ko_file, sep=\"\\t\")\n\n \"get positions\"\n index, chr_list = self.change_index(list(hek_mat.index))\n columns, _ = self.change_index(hek_mat.columns)\n\n \"assign rows, columns and chr\"\n hek_mat.index = index\n hek_mat.columns = columns\n hek_mat[\"chr\"] = chr_list\n\n \"get matrices for TAL1 and LMO2\"\n tal1_mat = hek_mat.loc[hek_mat[\"chr\"] == \"chr1\"]\n tal1_mat = tal1_mat.iloc[:, 0:285]\n lmo2_mat = hek_mat.loc[hek_mat[\"chr\"] == \"chr11\"]\n lmo2_mat = lmo2_mat.iloc[:, 286:632]\n tal1_mat = tal1_mat.groupby(level=0, axis=1).sum()\n tal1_mat = tal1_mat.groupby(level=0, axis=0).sum()\n lmo2_mat = lmo2_mat.groupby(level=0, axis=1).sum()\n lmo2_mat = lmo2_mat.groupby(level=0, axis=0).sum()\n\n \"prepare data in the form of Hi-C\"\n tal_i = list(tal1_mat.index)\n tal_j = tal1_mat.columns\n lmo2_i = list(lmo2_mat.index)\n lmo2_j = lmo2_mat.columns\n\n tal_df = pd.DataFrame(columns=[\"i\", \"j\", \"v\"])\n for i in tal_i:\n for j in tal_j:\n tal_df = tal_df.append({\"i\": i, \"j\": j, \"v\": tal1_mat.loc[i][j]}, ignore_index=True)\n\n lmo2_df = pd.DataFrame(columns=[\"i\", \"j\", \"v\"])\n for i in lmo2_i:\n for j in lmo2_j:\n lmo2_df = lmo2_df.append({\"i\": i, \"j\": j, \"v\": lmo2_mat.loc[i][j]}, ignore_index=True)\n\n \"save data\"\n if self.cfg.tal_mode == \"wt\":\n tal_df.to_csv(cfg.hic_path + cfg.cell + \"/tal_df.txt\", sep=\"\\t\")\n lmo2_df.to_csv(cfg.hic_path + cfg.cell + \"/lmo2_df.txt\", sep=\"\\t\")\n else:\n tal_df.to_csv(cfg.output_directory + \"tal1_ko.txt\", sep=\"\\t\")\n lmo2_df.to_csv(cfg.output_directory + \"lmo2_ko.txt\", sep=\"\\t\")",
"def read_f_RH(ceil_lam):\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH",
"def ELRscript(model,mon,fday,fyr,day1,day2,nday,hdate_last,lit,liti,wk,nla1,sla1,wlo1,elo1,nla2,sla2,wlo2,elo2,fprefix,mpref,training_season,ntrain,rainfall_frequency,MOS):\n\n#%% model Hindcasts \n\tfh_xh = Dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc', mode='r')\n\tfh_yh = Dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc', mode='r')\n\n\tlons = fh_xh.variables['X'][:]\n\tlats = fh_xh.variables['Y'][:]\n\n\tx = fh_xh.variables['tp'][:]; x = np.squeeze(x)\n\ty = fh_yh.variables['tp'][:]\n\tndat1, nlat, nlon = np.shape(x)\n\tx1=x[:,1,1]\n\tI = np.where(x1>10000)\n\tbad_value_num=len(x1[I])\n\tndat=ndat1-bad_value_num\n\n#%% ELR: Train the models\n# Make a dictionary to contain the 'LogisticRegression' objects and terciles\n\telr_dict = {} # create an empty dictionary\n\telr_climo_dict = {} # create an empty dictionary for the climo forecast\n\n\tym = np.mean(y,axis=0)\n\tmsk = ma.getmask(ym)\n\tindex_land = np.empty((nlat,nlon),dtype=int)\n\txm0 = x\n\t#xm = xm0[0:int(ndat/2),:,:]\n\txm = xm0[0:lit,:,:]\n\n\tx0 = np.zeros(np.shape(xm)) # array of zeros to construct the climo forecast\n\tijland = -1\n\tfor j in range(nlat):\n\t# print(\"in j loop, j=\", j)\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\tindex_land[j,i] = ijland # index of land points\n\t\t\t\t#elr_dict[ijland] = elr_fit(xm[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\t#elr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\telr_dict[ijland] = elr_fit(xm[:,j,i], y[0:lit,j,i])\n\t\t\t\telr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:lit,j,i])\n\t\t\t# ijland is the dictionary key that can be used to assess the entries, like this\n\t\t\t# mymodel, mytercs = mydict[0]\n\t\t\t# mymodel.coef_\n\tnland = ijland+1\n\t#print('ELR training done with total landpoints = ',nland)\n\n\t#%% Make set of ELR in-sample hindcasts (no XV)\n\t#elr_hc = np.empty((ndat,nlat,nlon,3)); elr_hc.fill(np.nan)\n\t#elr_hc = np.empty((int(ndat/2),nlat,nlon)); elr_hc.fill(np.nan)\n\telr_hc = np.empty((lit,nlat,nlon)); elr_hc.fill(np.nan)\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\t#elr_hc[:,j,i,:] = elr_tercilesPredict(xm[:,j,i], terciles, elrmodel)\n\t\t\t\telr_hc[:,j,i] = elr_quantilePredict(xm[:,j,i], elrmodel)\n\n# ijland = index_land[lat1, lon1]\n# elrmodel, terciles = elr_dict[ijland]\n# elrmodel_climo, terciles = elr_climo_dict[ijland]\n# poe, q_fcst, q_clim, = elr_poe( xm[idat,lat1,lon1], elrmodel, elrmodel_climo )\n# plt.figure()\n\n\t#print('Set of ELR hindcasts made on a map of xy gridpoints')\n#---------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\t#T=int(ndat/2)\n\tT=lit\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(elr_hc, axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tTarr=np.empty(ndat,dtype=int)\n\tfor it in range(ndat):\n\t\tTarr[it]=1901+it\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(y[0:lit,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tT1=lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\tndat_fc = ndat-lit\n\txf = x[lit:ndat,:,:]\n\tyf = y[lit:ndat,:,:]\n\n#%% Verification period\n########################################\n\n\telr_fc = np.empty((ndat_fc,nlat,nlon,3)); elr_fc.fill(np.nan)\n\trpss_ELR_fc = np.ma.array(np.empty((nlat,nlon)), mask=msk, fill_value=np.nan)\n\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\telr_fc[:,j,i,:] = elr_tercilesPredict(xf[:,j,i], terciles, elrmodel)\n\t#print('Set of ELR forcasts made on a map of xy gridpoints')\n\n#----------------------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_verification.txt'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tf.write(\"cpt:ncats=3\\n\")\n\tW=nlon\n\tH=nlat\n\tds=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\tT=ndat-lit\n\tTarr1=Tarr[lit:]\n\tXarr=lons\n\tYarr1=lats\n\tYarr=Yarr1[::-1] #Y should from N to S\n\tvari='tp'\n\tvar=np.flip(elr_fc, axis=1)*100\n\tvar[np.isnan(var)]=-1.0 #use CPT missing value\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:C=1, cpt:clim_prob=0.33333333333300003, cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=probability (%), cpt:missing=-1.0000000000000000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,0]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=2, cpt:clim_prob=0.33333333333400000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,1]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=3, cpt:clim_prob=0.33333333333299997\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,2]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_verification.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\t#var=np.flip(y[int(ndat/2):,:,:], axis=1)\n\tvar=np.flip(y[lit:,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\t#T1=int(ndat/2)\n\tT1=ndat-lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()",
"def display_hr_cams(self, model_hr=self.model, hr_path='hr_model.hdf5'):\n model_hr = model_hr\n hr_path = hr_path\n model_hr.load_weights(hr_path)\n \n print('Calculating HR hmp')\n hr_hmp, hr_pred, hr_w1 = get_heatmaps_hr(model_hr,self.images, self.layer_ids, self.pred_layer)\n for i in range(len(hr_hmp)):\n hr_hmp[i] = np.maximum(hr_hmp[i],0,hr_hmp[i])\n hr_hmp[i] = (hr_hmp[i]-hr_hmp[i].min())/(hr_hmp[i].max()-hr_hmp[i].min())\n\n idx = np.random.randint(0,len(_y))\n x_img = np.copy(_x[idx])\n y_img = np.copy(_y[idx])\n print('Idx: ',idx,' Class: ',y_img)\n\n plt.figure(figsize=(20, 20))\n\n plt.subplot(1, 2, 1)\n img = cv2.resize(np.squeeze(x_img[:,:,0]), tuple(input_shape[0:2]), 0, 0, 0, interpolation=cv2.INTER_LINEAR)\n plt.imshow(img, 'gray', interpolation='none')\n\n plt.subplot(1, 2, 2)\n output_0 = cv2.resize(np.squeeze(hr_hmp_0[idx]), tuple(input_shape[0:2]), 0, 0, 0, interpolation=cv2.INTER_LINEAR)\n plt.imshow(img, 'gray', interpolation='none')\n plt.imshow(output_0, 'jet', interpolation='none', alpha=0.50)\n\n #plt.show()\n plt.savefig('Hr_cam.png')",
"def get_data(self, key):\n if key not in Hdf5Reader.data_paths:\n raise KeyError('Dictionary key not in valid keys. Use get_data_by_path')\n\n hdf = H.File(self.file_name, 'r')\n\n if key == 'lb_velocity_x':\n data = hdf[Hdf5Reader.data_paths[key]][()][1]\n\n elif key == 'lb_velocity_y':\n data = hdf[Hdf5Reader.data_paths[key]][()][0]\n\n elif key == 'dlvo_x':\n data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\\n hdf[Hdf5Reader.data_paths['attractive_x']][()]\n # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\\n # hdf[Hdf5Reader.data_paths['lvdw_x']][()]\n data = data[0]\n\n elif key == 'dlvo_y':\n data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\\n hdf[Hdf5Reader.data_paths['attractive_y']][()]\n # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\\n # hdf[Hdf5Reader.data_paths['lvdw_y']][()]\n data = data[0]\n\n elif key == 'dlvo_fine':\n data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\\n hdf[Hdf5Reader.data_paths['attractive_fine']][()]\n data = data[0]\n\n elif key in ('lvdw_x', 'lvdw_y',\n 'lewis_x', 'lewis_y',\n 'edl_x', 'edl_y',\n 'dlvo_x', 'dlvo_y',\n 'attractive_x',\n 'attractive_y',\n 'distance_array',\n 'edl_fine',\n 'attractive_fine',\n 'distance_fine'):\n\n data = hdf[Hdf5Reader.data_paths[key]][()][0]\n\n else:\n data = hdf[Hdf5Reader.data_paths[key]][()]\n\n hdf.close()\n return data",
"def _read_wall_h5(self):\n self.walllabdict = {\"R_wall\":\"/wall/2d/R\", \"Z_wall\":\"/wall/2d/z\",\\\n \"divflag\":\"/wall/2d/divFlag\", \"segLen\":\"/wall/2d/segLen\"}\n self.w = dict.fromkeys(self.walllabdict)\n\n for k in self.walllabdict:\n self.w[k] = self.infile[self.walllabdict[k]][:]\n self.R_w = self.w['R_wall']\n self.z_w = self.w['Z_wall']",
"def thickness(filepaths, time, predictor):\n\n thickness = Camps_data('PLThick')\n international_units = { 'length' : 'm' }\n\n iu_unit = international_units.get('length')\n iu_pint = units.Quantity(1., iu_unit)\n\n unit = None\n try:\n unit = thickness.metadata['units']\n u_pint = units.Quantity(1., unit)\n assert(u_pint.dimensionality == iu_pint.dimensionality),\"Unit for pressure layer thickness in metadata has wrong dimensionality.\"\n except KeyError:\n logging.info(\"Metadata key \\'units\\' does not exist or has no value.\")\n logging.info(\"Adopt the unit of the fetched geopotential heights.\")\n pass\n\n thickness.metadata.update({'units' : unit})\n\n #Make a deep copy of the predictor object.\n pred = copy.deepcopy(predictor)\n\n #Sort the isobar values with pl haviing the lesser value.\n plevel1 = pred['search_metadata'].__getitem__('vert_coord1')\n plevel2 = pred['search_metadata'].__getitem__('vert_coord2')\n pl = plevel1\n pg = plevel2\n if pl > pg:\n pl = plevel2\n pg = plevel1\n thickness.add_vert_coord(pl,level2=pg,vert_type='plev')\n\n #Fetch the geopotential heights.\n pred['search_metadata'].update({'property' : parse_pred.observedProperty('GeoHght')})\n #The keys 'vert_coord2' and 'vert_method' are not needed to\n # retrieve the two geopotential heights.\n pred['search_metadata'].pop('vert_coord2')\n pred['search_metadata'].pop('vert_method')\n #------------------------------------------------------\n\n q_ght = units.Quantity(1., unit)\n #Fetch the geopotential height corresponding to the lesser isobar.\n pred['search_metadata'].update({'vert_coord1' : pl})\n ght_pl = read_var(filepath=filepaths, forecast_time=time, **pred['search_metadata'])\n assert(isinstance(ght_pl,Camps_data)),\"ght_pl expected to be camps data object\"\n mask = np.ma.getmaskarray(ght_pl.data)\n try:\n hpl_unit = ght_pl.metadata['units']\n hpl_pint = units.Quantity(1., hpl_unit)\n assert(hpl_pint.dimensionality == iu_pint.dimensionality),\"Unit of fetched geopotential height has wrong dimensionality.\"\n except KeyError:\n logging.info(\"Fetched geopotential height has no units!\")\n raise\n q_ghtPL = units.Quantity(ght_pl.data, hpl_unit)\n if unit is None:\n unit = hpl_unit\n thickness.metadata.update({ 'units' : unit })\n thickness.add_component(ght_pl)\n thickness.preprocesses = ght_pl.preprocesses\n\n #Fetch the geopotential height corresponding to the greater isobar.\n pred['search_metadata'].update({'vert_coord1' : pg})\n ght_pg = read_var(filepath=filepaths, forecast_time=time, **pred['search_metadata'])\n assert(isinstance(ght_pg,Camps_data)),\"ght_pg expected to be camps data object\"\n mask += np.ma.getmaskarray(ght_pg.data)\n try:\n hpg_unit = ght_pg.metadata['units']\n hpg_pint = units.Quantity(1., hpg_unit)\n assert(hpg_pint.dimensionality == iu_pint.dimensionality),\"Unit of fetched geopotential height has wrong dimensionality.\"\n except KeyError:\n logging.info(\"Fetched geopotential height has no units!\")\n raise\n q_ghtPG = units.Quantity(ght_pg.data, hpg_unit)\n thickness.add_component(ght_pg)\n for proc in ght_pg.preprocesses:\n thickness.add_preprocess(proc)\n\n# Copy the processes from ght_pg\n #thickness.processes = copy.deepcopy(ght_pg.processes)\n q_thick = (q_ghtPL - q_ghtPG).to(unit)\n thickness.add_dimensions('phenomenonTime')\n thickness.add_dimensions('y')\n thickness.add_dimensions('x')\n thickness.add_data(np.ma.array(np.array(q_thick), mask=mask))\n\n #Construct the pressure layer thickness camps data object.\n #The object is built by instantiating an object with empty substructures\n #and filling one substructure (metadata) with information in the\n #necessary metadata. We further fill in the substructures with\n #references to the fetched object ght_pl, and edit portions to be\n #relevant for the constructed predictor.\n thickness.location = ght_pl.location\n thickness.time = copy.deepcopy(ght_pl.time)\n thickness.metadata.update({'FcstTime_hour' : ght_pl.metadata.get('FcstTime_hour')})\n thickness.metadata.update({'PROV__hadPrimarySource' : ght_pl.metadata.get('PROV__hadPrimarySource')})\n thickness.add_process('PressLayerThickCalc')\n\n return thickness",
"def make_obslog():\n # load config file\n config = load_config('LHRS\\S*\\.cfg$')\n rawpath = config['data'].get('rawpath')\n\n statime_key = config['data'].get('statime_key')\n exptime_key = config['data'].get('exptime_key')\n\n # prepare logtable\n logtable = Table(dtype=[\n ('frameid', 'i2'),\n ('fileid', 'S12'),\n ('imgtype', 'S3'),\n ('object', 'S12'),\n ('exptime', 'f4'),\n ('obsdate', 'S19'),\n ('nsat', 'i4'),\n ('q95', 'i4'),\n ])\n\n fmt_str = ' - {:11s} {:5s} {:<12s} {:>7} {:^23s} {:>7} {:>5}'\n head_str = fmt_str.format('fileid', 'type', 'object', 'exptime',\n 'obsdate', 'nsat', 'q95')\n print(head_str)\n\n # start scanning the raw files\n for fname in sorted(os.listdir(rawpath)):\n mobj = re.match('(LHRS\\d{6}[A-Z])(\\d{4})([a-z])\\.fits', fname)\n if not mobj:\n continue\n filename = os.path.join(rawpath, fname)\n data, head = fits.getdata(filename, header=True)\n\n frameid = int(mobj.group(2))\n fileid = mobj.group(1) + mobj.group(2) + mobj.group(3)\n exptime = head[exptime_key]\n\n # guess object name from filename\n if mobj.group(3)=='b':\n objectname = 'Bias'\n elif mobj.group(3)=='f':\n objectname = 'Flat'\n else:\n objectname = ''\n\n obsdate = head[statime_key]\n if mobj.group(3)=='o':\n imgtype = 'sci'\n else:\n imgtype = 'cal'\n\n # determine the total number of saturated pixels\n saturation = (data>=44000).sum()\n\n # find the 95% quantile\n quantile95 = int(np.round(np.percentile(data, 95)))\n\n item = [frameid, fileid, imgtype, objectname, exptime, obsdate,\n saturation, quantile95]\n logtable.add_row(item)\n\n item = logtable[-1]\n\n # print log item with colors\n string = fmt_str.format(fileid,\n '({:3s})'.format(imgtype), objectname, exptime,\n obsdate, saturation, quantile95)\n print(print_wrapper(string, item))\n\n # sort by obsdate\n #logtable.sort('obsdate')\n\n # determine filename of logtable.\n # use the obsdate of the first frame\n obsdate = logtable[0]['obsdate'][0:10]\n outname = 'log.{}.txt'.format(obsdate)\n if os.path.exists(outname):\n i = 0\n while(True):\n i += 1\n outname = 'log.{}.{}.txt'.format(obsdate, i)\n if not os.path.exists(outname):\n outfilename = outname\n break\n else:\n outfilename = outname\n\n # set display formats\n logtable['imgtype'].info.format = '^s'\n logtable['fileid'].info.format = '<s'\n logtable['object'].info.format = '<s'\n logtable['exptime'].info.format = 'g'\n\n # save the logtable\n outfile = open(outfilename, 'w')\n for row in logtable.pformat_all():\n outfile.write(row+os.linesep)\n outfile.close()",
"def _expand_x_t(data_dir, n_data, times):\n with h5py.File(data_dir + \"/lhs{}_t{}.hdf5\".format(n_data, len(times)), 'r') as f:\n x_train = f['input'][()]\n y_train = f['output'][()]\n times = f['times'][()]\n\n # convert input data format (N, 1, iH, iW) --> (T * N, 1, iH, iW)\n x_train_aug = np.zeros((len(times) * x_train.shape[0], *x_train.shape[1:]))\n times_aug = np.zeros(len(times) * x_train.shape[0])\n # (x, t): loop t first\n for i in range(x_train.shape[0]):\n x_train_aug[i * len(times): (i + 1) * len(times)] = x_train[i]\n times_aug[i * len(times): (i + 1) * len(times)] = times\n print(\"total input shape: {}\".format(x_train_aug.shape))\n print(\"total times shape: {}\".format(times_aug.shape))\n\n # convert output data format (N, T * oC, oH, oW) --> (T * N, oC, oH, oW)\n y_train_aug = np.zeros((len(times) * y_train.shape[0],\n y_train.shape[1] // len(times),\n *y_train.shape[2:]))\n\n for i in range(y_train.shape[0]):\n y_temp = []\n for j in range(len(times)):\n # T x oC x oH x oW\n indices = [j + len(times) * k for k in range(y_train_aug.shape[1])]\n y_temp.append(y_train[i, indices])\n y_train_aug[i * len(times): (i + 1) * len(times)] = np.stack(y_temp)\n print(\"total output data shape: {}\".format(y_train_aug.shape))\n\n with h5py.File(data_dir + \"/lhs{}_t{}_expanded.hdf5\".format(n_data, len(times)),\n 'w') as f:\n input = f.create_dataset(name='input', data=x_train_aug, dtype='f',\n compression='gzip')\n print(input.shape)\n print(input[-1])\n output = f.create_dataset(name='output', data=y_train_aug, dtype='f',\n compression='gzip')\n print(output.shape)\n print(output[-1])\n times = f.create_dataset(name='times', data=times_aug, dtype='f')\n print(times[()])",
"def twophotonHRead():\n xuvtop = os.environ['XUVTOP']\n fName = os.path.join(xuvtop, 'continuum', 'hseq_2photon.dat')\n dFile = open(fName, 'r')\n a = dFile.readline()\n y0 = np.asarray(a.split())\n a = dFile.readline()\n z0 = np.asarray(a.split())\n nz = 30\n avalue = np.zeros(nz, 'float64')\n asum = np.zeros(nz, 'float64')\n psi0 = np.zeros((nz, 17), 'float64')\n for iz in range(nz):\n a = dFile.readline().split()\n avalue[iz] = float(a[1])\n asum[iz] = float(a[2])\n psi = np.asarray(a[3:])\n psi0[iz] = psi\n dFile.close()\n return {'y0':y0, 'z0':z0, 'avalue':avalue, 'asum':asum, 'psi0':psi0.reshape(30, 17)}",
"def _parse_hdus(cls, hdulist):\n # Open file with PyFITS\n fits_record = hdulist[1].data\n\n metadata = MetaDict(OrderedDict(hdulist[0].header))\n start_str = metadata.get('date-obs', metadata.get('date_obs', ''))\n start = parse_time(start_str)\n\n # First column are times. For level 2 data, the units are [s].\n # For level 3 data, the units are [min]\n if hdulist[1].header['TUNIT1'] == 's':\n times = start + TimeDelta(fits_record.field(0)*u.second)\n elif hdulist[1].header['TUNIT1'] == 'MIN':\n td = [int(n) for n in fits_record.field(0)]\n times = start + TimeDelta(td*u.minute)\n else:\n raise ValueError(\"Time unit in LYRA fits file not recognised. \"\n \"Value = {}\".format(hdulist[1].header['TUNIT1']))\n\n # Rest of columns are the data\n table = {}\n\n for i, col in enumerate(fits_record.columns[1:-1]):\n # temporary patch for big-endian data bug on pandas 0.13\n if fits_record.field(i+1).dtype.byteorder == '>' and sys.byteorder == 'little':\n table[col.name] = fits_record.field(i + 1).byteswap().newbyteorder()\n else:\n table[col.name] = fits_record.field(i + 1)\n\n # Return the header and the data\n times.precision = 9\n data = pandas.DataFrame(table, index=times.isot.astype('datetime64'))\n data.sort_index(inplace=True)\n\n # Add the units data\n units = OrderedDict([('CHANNEL1', u.W/u.m**2),\n ('CHANNEL2', u.W/u.m**2),\n ('CHANNEL3', u.W/u.m**2),\n ('CHANNEL4', u.W/u.m**2)])\n # TODO: check: http://www.wmo-sat.info/oscar/instruments/view/733\n return data, metadata, units"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calibrate the bsc observations
|
def calibrate_BSC_data_v1p0(bsc_obs, site):
calib_path = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/' \
'Calibrations_for_LUMO_Ceilometers/'
filename = calib_path + site + '_window_trans_daily_cpro.pickle'
# sort site name out (is in CL31-A_BSC_KSS45W format, but needs to be CL31-A_KSS45W
# load calibration data (using pickle)
with open(filename, 'rb') as handle:
window_trans_daily = pickle.load(handle)
for i, time_i in zip(np.arange(len(bsc_obs[site]['time'])), bsc_obs[site]['time']):
# find date in window_trans_daily
time_idx = np.where(np.array(window_trans_daily['dates']) == time_i.date())
# apply calibration to bsc data
bsc_obs[site]['backscatter'][i, :] *= window_trans_daily['c_pro'][time_idx]
return bsc_obs
|
[
"def __calibrate(self):\n my_name = '__calibrate'\n\n # check that transient members are present\n if not (hasattr(self, \"_Spectrum__wave_solution\")):\n self.__calibrationWaveSolution()\n\n # calibrate\n self.__wavelength = self.__wave_solution(np.arange(self.__spectrum.size))\n self.__calibrated = True",
"def calibrate_all(self):\n print(\"disconnect battery and press Enter\")\n input()\n self._set_all(self.motor_max)\n print(\"connect battery and press enter\")\n input()\n time.sleep(2)\n self._set_all(self.motor_min)\n time.sleep(8)\n print(\"calibrated\")",
"def calibscin(Barrel_VectorSignals):\n\t#scincalibrations = [415.46911408615017, 415.49032413530045, 414.2257295618793, 413.52149204021396, 411.5992702498399, 410.64900009885315, 410.05084897192495, 410.2933477391751, 411.3409180068102, 412.31065478145797, 410.66077419019877, 411.16318351136084, 411.44777420544165, 411.9325417788443, 411.37997153809016, 411.388755835253, 411.8810225437739, 411.58323678675737, 411.40360405115445, 411.4363536068476, 411.1434892175137, 411.0311309144043, 411.45278182281913, 411.295529214398, 410.66722584949844, 411.95872061830596, 411.5134821865462, 411.8234427877747, 411.0133769672026, 410.49154689961557, 411.8200149562403, 411.4650824673985, 410.2795836068218, 410.99784486503626, 411.3719137475675, 411.1496647172716, 411.10361391486657, 411.01421880697376, 410.597406149097, 410.8673343110665, 410.4613021375798, 410.9360845492963, 410.9528787632441, 410.8574880829973, 411.03315409936096, 410.9379153502092, 411.01901911573697, 410.80356948239154, 411.0444263559407, 410.84313941730574, 410.94072813786545, 410.204939014593, 410.9738410909085, 410.3391884191118, 410.6721258934903, 410.51534130232534, 409.62060911658847, 408.77108688926836, 409.99777980214225, 409.5992379372193, 409.83732253845403, 410.210413105926, 409.1679944501218, 409.7662734551919, 408.11740676906703, 408.3924355876955, 408.03209385093214, 407.6136434771539, 407.49508269173134, 407.10864535201216, 406.49601594991765, 407.97586047362637, 407.6622587467224, 407.70767503648653, 408.04075414921584]*36\t\n\t#scincorrections = [411.41483745459146, 411.52181636082577, 410.25098543903624, 409.76481640097643, 407.8648162057763, 407.0433073672767, 406.4280147748465, 406.6202788727341, 407.6662499733772, 408.6662149874733, 407.04700222023894, 407.5469707080265, 407.8482602981088, 408.34396984724685, 407.8356135111425, 407.9006104165864, 408.4005126486694, 408.08624713560107, 408.0027574234192, 408.03512549563607, 407.7646802314756, 407.66902911324036, 408.13219631572156, 408.0856386283693, 407.38021535979243, 408.76602535322274, 408.3354792012611, 408.7493505933448, 407.97452518970755, 407.4711792144778, 408.78371452969293, 408.59189054866147, 407.3678692910586, 408.1980036194848, 408.6716381355258, 408.4772494588298, 408.48623697063414, 408.4283387274703, 408.0399995983036, 408.459433126158, 408.0009678326388, 408.4220840256861, 408.401124384891, 408.22498572575756, 408.2946388759221, 408.16241045483764, 408.2162208231712, 407.86038369360386, 408.10072483636185, 407.8308977000335, 407.8739032108822, 407.05402921668883, 407.81948812803313, 407.0379965972104, 407.2807418021031, 407.2472061094793, 406.2461313455679, 405.3115718891361, 406.46924320089, 406.04385491758575, 406.23962673351735, 406.485581819831, 405.37184790415176, 405.9200835792759, 404.174107208703, 404.3861928079304, 403.94869689782337, 403.3611910496827, 403.06973154521165, 402.5554299506423, 401.6688569907378, 402.8571098242905, 402.03193211770736, 401.6356578529606, 401.29776188137134]*36\n\t#containment = [0.9922257350540894, 0.9924029080295286, 0.9925314933889849, 0.9923710947914168, 0.9925005601234895, 0.9924571682663081, 0.9924736876499678, 0.9923107627997272, 0.9923717693883665, 0.992462552656521, 0.9924100067589308, 0.9924641871673259, 0.9923359607471853, 0.9924773952914967, 0.9924178141593918, 0.9924772080326696, 0.9922415412669103, 0.992379952512389, 0.9924161038441175, 0.9923936205889852, 0.9924236863162547, 0.9924316563576252, 0.9923568814774528, 0.9924405399594359, 0.9924611132742223, 0.9924930212297, 0.9923853174427201, 0.9923924066611056, 0.9924806854716944, 0.992390053683815, 0.9923893436872515, 0.9924596118186407, 0.9924440389467973, 0.9923388445611183, 0.9924589198963458, 0.9924056855957695, 0.9923602278246142, 0.9924218711993472, 0.9922791098871626, 0.9924370426348486, 0.9923948825562914, 0.9924726323036805, 0.9924027753718472, 0.9924520521026018, 0.9924006741483085, 0.9923441256883894, 0.9925201121485623, 0.9924546188776329, 0.9924782088159386, 0.9923947843091397, 0.9924698150227844, 0.9922952793358721, 0.9924636652757733, 0.9924582419298134, 0.9924127350793948, 0.9924450958333008, 0.9924155137751113, 0.992411906630523, 0.9924154871234933, 0.992445097600654, 0.9924117857125934, 0.9924140566552296, 0.9924301803483659, 0.9924252926230659, 0.992427658103939, 0.9924930313657724, 0.9925539612633278, 0.9924001191478654, 0.9924655317037001, 0.9924423192903769, 0.9925128053546892, 0.9925077413435955, 0.9923661833622193, 0.9920001139857957, 0.9762479738306956]\n\ts_cont = [408.21638950554075, 408.3954472740771, 407.1870232421094, 406.63875945884087, 404.8060585388971, 403.97304819147996, 403.3691105878475, 403.49367909804056, 404.55647780600043, 405.58591491094637, 403.9575182245898, 404.4757730162475, 404.72249522199195, 405.272159576985, 404.74332809708255, 404.83205898107536, 405.23195412471205, 404.9766105533868, 404.9085068798063, 404.9314555180952, 404.67532710488985, 404.58364980855805, 405.012793566413, 405.0007315500301, 404.30902206187204, 405.6974274788762, 405.2261341502687, 405.63975175649347, 404.90683641527, 404.37034541526305, 405.67260217215875, 405.5109490861691, 404.2898135363692, 405.07073526391474, 405.58981257625425, 405.3751447994642, 405.36549518339785, 405.3332161707569, 404.88956759976287, 405.37027184803094, 404.8980725551248, 405.34774082392767, 405.2984093045488, 405.14372480308344, 405.19187487160525, 405.03757034167137, 405.16280927227615, 404.7829216539207, 405.03107640207867, 404.7292557576276, 404.8025372723253, 403.9177916263665, 404.7460239584375, 403.96821450150077, 404.1905949169899, 404.1704924951662, 403.16496315846314, 402.2360298379118, 403.3863719919289, 402.9762332238292, 403.15699339382735, 403.4020052256797, 402.3032561236677, 402.8453577277423, 401.11356268338346, 401.3504783424065, 400.94087925309395, 400.29569405733, 400.0328154316862, 399.5130445431503, 398.66148407548866, 399.83880015591535, 398.96289406538807, 398.42261837089694, 391.76612693948175]*36\n\n\t#sequence is: scincalibrations are calibrations estimated at first by firing electrons \n\t#containment is containment tower by tower\n\t#scincorrections are the corrections to apply to get correct calibration constant to reconstruct energy deposited (s vector)\n\t#s_cont is s vector corrected by containment to reconstrct primary electron energy\n\t\n\tscincalibrations = [0.1]+s_cont\n\tif len(Barrel_VectorSignals) != len(scincalibrations):\n\t\tprint \"wrong calibration length s\"+str(len(Barrel_VectorSignals))+\" \"+str(len(scincalibrations))\n\t\tquit()\n\t\n\tcalib_Barrel_VectorSignals = [Barrel_VectorSignals[counter]*(1/(entry)) for counter, entry in enumerate(scincalibrations)]\n\treturn calib_Barrel_VectorSignals",
"def calibrating(self):\n self.current = State.CALIBRATING",
"def full_reset_and_calibrate(odrv0):\r\n\todrv0.erase_configuration()\r\n\tprint(\"Erased [1/7]\")\r\n\ttry: # Reboot causes loss of connection, use try to supress errors\r\n\t\todrv0.reboot()\r\n\texcept:\r\n\t\tpass\r\n\tprint(\"Rebooted [2/7]\")\r\n\todrv0 = odrive.find_any() # Reconnect to the Odrive\r\n\tprint(\"Connected [3/7]\")\r\n\todrv0.axis0.motor.config.pre_calibrated = True # Set all the flags required for pre calibration\r\n\todrv0.axis0.encoder.config.pre_calibrated = True\r\n\todrv0.axis0.encoder.config.use_index = True\r\n\todrv0.axis0.config.startup_encoder_index_search = True # Change startup sequence\r\n\todrv0.axis0.config.startup_closed_loop_control = True\r\n\todrv0.axis0.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE # Calibrate\r\n\tprint(\"Started calibration 1 [4/7]\", end=\"\")\r\n\twhile odrv0.axis0.current_state != AXIS_STATE_IDLE: # Wait for calibration to be done\r\n\t\ttime.sleep(0.1)\r\n\t\tprint(\".\", end=\"\")\r\n\todrv0.save_configuration()\r\n\r\n\tprint(\"\\nCalibration 1 complete [5/7]\")\r\n\tprint(\"now will begin calibration sequence for second axis\")\r\n\ttime.sleep(3)\r\n\todrv0.axis1.motor.config.pre_calibrated = True # Set all the flags required for pre calibration\r\n\todrv0.axis1.encoder.config.pre_calibrated = True\r\n\todrv0.axis1.encoder.config.use_index = True\r\n\todrv0.axis1.config.startup_encoder_index_search = True # Change startup sequence\r\n\todrv0.axis1.config.startup_closed_loop_control = True\r\n\todrv0.axis1.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE # Calibrate\r\n\tprint(\"Started calibration 2 [6/7]\", end=\"\")\r\n\twhile odrv0.axis1.current_state != AXIS_STATE_IDLE: # Wait for calibration to be done\r\n\t\ttime.sleep(0.5)\r\n\t\tprint(\".\", end=\"\")\r\n\r\n\tprint(\"\\nCalibration 2 complete [7/7]\")\r\n\r\n\t#closed loop control for both axis\r\n\todrv0.save_configuration()\r\n\todrv0.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\r\n\todrv0.axis1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\r\n\r\n\treturn odrv0",
"def bsc(x,delta):",
"def calibcher(Barrel_VectorSignalsCher):\n\t#chercalibrations = [106.37048257816659, 106.13146567275112, 105.86590903712592, 105.81634772611199, 105.70865247970764, 105.64190045805486, 105.6639555914862, 105.5283776283045, 105.65723353008754, 105.7715133071762, 105.5658607357618, 105.61417315615921, 105.70536383822864, 105.77758372693368, 105.77043752701637, 105.5506909020155, 105.70623524436972, 105.65209417795693, 105.73892093109146, 105.61237691382905, 105.61860853169827, 105.63069081304641, 105.63120977003204, 105.72883741949272, 105.64351076067801, 105.51191943482567, 105.75579206781183, 105.6255099492349, 105.52676483799216, 105.50729849902562, 105.49691196235294, 105.51232976499537, 105.49294739086051, 105.46743366546217, 105.39944256662419, 105.43093327017503, 105.42218126102728, 105.33353578857226, 105.26156500953066, 105.19128846086149, 105.31605046745744, 105.35904116180494, 105.31714780361818, 105.34284763793717, 105.31700630345172, 105.32123783068911, 105.3888770522753, 105.37422214386339, 105.52604928728498, 105.46278011845477, 105.28372058763158, 105.4206155499903, 105.44174663935398, 105.56551159861326, 105.43779145304754, 105.54146571740893, 105.55442493693165, 105.67687272835073, 105.64965481223994, 105.65043713995333, 105.39546198539618, 105.5315247334327, 105.46869729570527, 105.45532655577286, 105.45938285691861, 105.49087149606235, 105.44596875109438, 105.52868954952874, 105.53867506889421, 105.69754494826158, 105.86874369528533, 105.70806060408836, 105.80369193129457, 106.1513324795608, 106.22235486676682]*36\n\t#chercorrections = [103.89550278429044, 103.70085240913916, 103.47143663620487, 103.40757852713733, 103.32416049481843, 103.2595513611303, 103.2771449523405, 103.14870022496955, 103.26744048386126, 103.40758019116339, 103.21664103253485, 103.25622797189547, 103.34575627566997, 103.44938982761327, 103.45740559874831, 103.25964142067909, 103.36696644454749, 103.34908426908248, 103.46274313918914, 103.34676995410369, 103.38553010427495, 103.41998384923339, 103.4223873054996, 103.54772829312716, 103.47303521689206, 103.38099295102664, 103.64838254840568, 103.52882284094234, 103.47351928457604, 103.45766349134928, 103.4805975522724, 103.53959817237843, 103.54212246362432, 103.52461109374697, 103.49938164982743, 103.56077151211231, 103.57780504091401, 103.53601233216357, 103.49962719536036, 103.47251867627905, 103.57132307678117, 103.59517284094395, 103.49923289942933, 103.51277318075437, 103.43104568143158, 103.40570108524098, 103.44047928171713, 103.39003361389342, 103.5289699793597, 103.41222913634279, 103.2043558306036, 103.30902132105307, 103.29948932213777, 103.37716306057568, 103.24111809153652, 103.31134211867192, 103.25381251814082, 103.35638287866391, 103.34984997105141, 103.24469534151174, 102.97376718686942, 103.06332233060792, 102.9776969458658, 102.9126043760532, 102.87127991738039, 102.82959630902663, 102.72936950682644, 102.74096868887399, 102.625968873716, 102.70935760664186, 102.73558010993771, 102.45477830507473, 102.33168129960764, 102.49598661880496, 102.19939271116233]*36\n\tc_cont = [103.08779161895677, 102.91302749597065, 102.69865952763615, 102.61869191270468, 102.54928716539662, 102.48068194031679, 102.49984890080964, 102.35556540203991, 102.47969263317724, 102.6281510005559, 102.43322742473204, 102.47810836409134, 102.55371034296142, 102.67118096060427, 102.67297232291142, 102.48284061965019, 102.5649981010228, 102.56155933915096, 102.67809243921879, 102.56067521092992, 102.60224889784466, 102.63726587197354, 102.63191774143888, 102.76496337880408, 102.6929637252195, 102.60491403169074, 102.85913301772406, 102.741217657914, 102.69546934772463, 102.67035622618218, 102.69304228926421, 102.75886941001674, 102.75976221892324, 102.731492956408, 102.7188845221274, 102.77429845330465, 102.78649420797491, 102.75140309520445, 102.70051794706535, 102.68996042906552, 102.78365100098196, 102.8153738834064, 102.71292597825087, 102.73146416207084, 102.6450394621172, 102.61404003462839, 102.66675609739092, 102.60991640602225, 102.750246685674, 102.62575682868824, 102.42720794074478, 102.51305416968992, 102.52098979376447, 102.59751750679058, 102.45780037787654, 102.53083482963227, 102.47068539942974, 102.5721049950492, 102.56599170316093, 102.46469174495641, 102.19238017547394, 102.28148980648412, 102.19817435184497, 102.1330715125064, 102.09230341456059, 102.05765775486448, 101.9644426420847, 101.96014956820567, 101.85273676485993, 101.93311307596035, 101.96637882465569, 101.68716060542853, 101.55050000833062, 101.67603040894112, 99.77195006099979]*36\n\n\tchercalibrations = [0.1]+c_cont\n\tif len(Barrel_VectorSignalsCher) != len(chercalibrations):\n\t\tprint \"wrong calibration length \\n\"\n\t\tquit()\n\n\tcalib_Barrel_VectorSignalsCher = [Barrel_VectorSignalsCher[counter]*(1/(entry)) for counter, entry in enumerate(chercalibrations)]\n\treturn calib_Barrel_VectorSignalsCher",
"def calibrate_submodel(self):\n \n model = self.model \n cell = self.knowledge_base.cell\n nucleus = model.compartments.get_one(id='n')\n mitochondrion = model.compartments.get_one(id='m')\n cytoplasm = model.compartments.get_one(id='c')\n\n beta = self.options.get('beta')\n\n Avogadro = self.model.parameters.get_or_create(\n id='Avogadro',\n type=None,\n value=scipy.constants.Avogadro,\n units=unit_registry.parse_units('molecule mol^-1')) \n\n rnas_kb = cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType)\n undetermined_model_kcat = []\n determined_kcat = []\n for rna_kb, reaction in zip(rnas_kb, self.submodel.reactions):\n\n init_species_counts = {}\n \n modifier_species = self._degradation_modifier[reaction.name] \n init_species_counts[modifier_species.gen_id()] = modifier_species.distribution_init_concentration.mean\n \n rna_kb_compartment_id = rna_kb.species[0].compartment.id\n if rna_kb_compartment_id == 'c':\n rna_compartment = cytoplasm\n degradation_compartment = cytoplasm\n else:\n rna_compartment = mitochondrion\n degradation_compartment = mitochondrion \n\n rna_reactant = model.species_types.get_one(id=rna_kb.id).species.get_one(compartment=rna_compartment)\n\n half_life = rna_kb.properties.get_one(property='half-life').get_value()\n mean_concentration = rna_reactant.distribution_init_concentration.mean\n\n average_rate = utils.calc_avg_deg_rate(mean_concentration, half_life)\n \n for species in reaction.get_reactants():\n\n init_species_counts[species.gen_id()] = species.distribution_init_concentration.mean\n\n if model.parameters.get(id='K_m_{}_{}'.format(reaction.id, species.species_type.id)):\n model_Km = model.parameters.get_one(\n id='K_m_{}_{}'.format(reaction.id, species.species_type.id))\n if species.distribution_init_concentration.mean:\n model_Km.value = beta * species.distribution_init_concentration.mean \\\n / Avogadro.value / species.compartment.init_volume.mean\n model_Km.comments = 'The value was assumed to be {} times the concentration of {} in {}'.format(\n beta, species.species_type.id, species.compartment.name)\n else:\n model_Km.value = 1e-05\n model_Km.comments = 'The value was assigned to 1e-05 because the concentration of ' +\\\n '{} in {} was zero'.format(species.species_type.id, species.compartment.name)\n\n model_kcat = model.parameters.get_one(id='k_cat_{}'.format(reaction.id))\n\n if average_rate: \n model_kcat.value = 1.\n eval_rate_law = reaction.rate_laws[0].expression._parsed_expression.eval({\n wc_lang.Species: init_species_counts,\n wc_lang.Compartment: {\n rna_compartment.id: rna_compartment.init_volume.mean * \\\n rna_compartment.init_density.value,\n degradation_compartment.id: degradation_compartment.init_volume.mean * \\\n degradation_compartment.init_density.value}\n })\n if eval_rate_law:\n model_kcat.value = average_rate / eval_rate_law\n determined_kcat.append(model_kcat.value)\n else:\n undetermined_model_kcat.append(model_kcat) \n else: \n undetermined_model_kcat.append(model_kcat)\n \n median_kcat = numpy.median(determined_kcat)\n for model_kcat in undetermined_model_kcat:\n model_kcat.value = median_kcat\n model_kcat.comments = 'Set to the median value because it could not be determined from data' \n\n print('RNA degradation submodel has been generated')",
"def test_calibration_docstring(self):\n dtype = np.float64\n\n observed_prices = np.array(\n [[20.09689284, 10.91953054, 4.25012702, 1.11561839, 0.20815853],\n [3.34813209, 6.03578711, 10.2874194, 16.26824328, 23.73850935]],\n dtype=dtype)\n\n strikes = np.array(\n [[80.0, 90.0, 100.0, 110.0, 120.0], [80.0, 90.0, 100.0, 110.0, 120.0]],\n dtype=dtype)\n expiries = np.array([[0.5], [1.0]], dtype=dtype)\n forwards = 100.0\n is_call_options = np.array([[True], [False]])\n\n beta = np.array([0.5, 0.5], dtype=dtype)\n\n models, is_converged, _ = tff.models.sabr.calibration(\n prices=observed_prices,\n strikes=strikes,\n expiries=expiries,\n forwards=forwards,\n is_call_options=is_call_options,\n beta=beta,\n calibrate_beta=False,\n volvol=np.array([1.0, 1.0], dtype=dtype),\n volvol_lower_bound=0.0,\n volvol_upper_bound=10.0,\n rho=np.array([0.0, 0.0], dtype=dtype),\n rho_lower_bound=-0.75,\n rho_upper_bound=0.75,\n maximum_iterations=1000)\n\n [calibrated_alpha, calibrated_beta, calibrated_volvol, calibrated_rho,\n is_converged] = self.evaluate(\n [models.alpha, models.beta, models.volvol, models.rho, is_converged])\n\n with self.subTest('AllConverged'):\n self.assertTrue(all(is_converged))\n with self.subTest('AlphaRecovered'):\n self.assertAllClose(calibrated_alpha, [1.5, 2.5], atol=2e-3, rtol=2e-3)\n with self.subTest('BetaRecovered'):\n self.assertAllClose(calibrated_beta, [0.5, 0.5], atol=2e-3, rtol=2e-3)\n with self.subTest('VolVolRecovered'):\n self.assertAllClose(calibrated_volvol, [0.33, 0.66], atol=2e-2, rtol=5e-2)\n with self.subTest('RhoRecovered'):\n self.assertAllClose(calibrated_rho, [0.1, -0.1], atol=1e-2, rtol=5e-2)",
"def ads_self_calibrate(self, ads_num):\n\t\treturn self.config_ads(ads_num, 6, 0)",
"def refresh_calibrationImage(self):\n cal = self.constraints[self.calibrating_color]\n hn = cal['h_min']\n hx = cal['h_max']\n sn = cal['s_min']\n sx = cal['s_max']\n vn = cal['v_min']\n vx = cal['v_max']\n\n def fill(h, img):\n for y in range(0, 256):\n v = (vx - vn) * y / 256 + vn\n for x in range(0, 256):\n s = (sx - sn) * x / 256 + sn\n\n img[y, x, 0] = h\n img[y, x, 1] = s\n img[y, x, 2] = v\n\n fill(hn, self.swatchn)\n fill(hx, self.swatchx)\n self.swatchn = cv.cvtColor(self.swatchn, cv.COLOR_HSV2BGR)\n self.swatchx = cv.cvtColor(self.swatchx, cv.COLOR_HSV2BGR)\n hor = np.hstack((self.swatchn, self.swatchx))\n cv.imshow(\"cal\", hor)",
"def updateBsplines(self, refit=False):\n\n self.computeQindices()\n self.computeCindices()\n self.computeDindices()\n self.computeKnots()\n self.computeParameters()\n self.computeJacobian()\n if refit:\n self.computeControlPts()\n self.computePoints()",
"def adc_run_calibrate(self, c):\n raise Exception('Depricated. Use ADC Recalibrate instead')\n dev = self.selectedADC(c)\n info = c.setdefault(dev, {})\n filterFunc = info.get('filterFunc', np.array([255], dtype='<u1'))\n # Default to no stretch.\n filterStretchLen = info.get('filterStretchLen', 0)\n # Default to stretch at 0.\n filterStretchAt = info.get('filterStretchAt', 0)\n demods = (dict((i, info[i]) for i in\n range(dev.DEMOD_CHANNELS) if i in info))\n yield dev.runCalibrate()",
"def test_update_calibration(self):\n backend = FakeBackend()\n ref_old_value = 0.1\n ref_new_value = 0.3\n\n param = Parameter(\"to_calibrate\")\n schedule = ScheduleBlock(name=\"test\")\n schedule.append(Play(Constant(100, param), DriveChannel(0)), inplace=True)\n cals = Calibrations()\n cals.add_schedule(schedule, 0, 1)\n\n # Add init parameter to the cal table\n cals.add_parameter_value(\n value=ref_old_value,\n param=\"to_calibrate\",\n qubits=(0,),\n schedule=\"test\",\n )\n\n # Get old value\n old_value = cals.get_parameter_value(\"to_calibrate\", (0,), \"test\")\n\n exp = MockCalExperiment(\n physical_qubits=(0,),\n calibrations=cals,\n new_value=ref_new_value,\n param_name=\"to_calibrate\",\n sched_name=\"test\",\n )\n exp.run(backend).block_for_results()\n\n # Get new value\n new_value = cals.get_parameter_value(\"to_calibrate\", (0,), \"test\")\n self.assertNotEqual(old_value, new_value)\n\n # Validate calibrated schedule\n new_schedule = cals.get_schedule(\"test\", (0,))\n ref_schedule = schedule.assign_parameters({param: ref_new_value}, inplace=False)\n self.assertEqual(new_schedule, ref_schedule)",
"def _update_boost(self):\r\n\t\t\r\n\t\tfor i, active in enumerate(self.boutputs):\r\n\t\t\tif int(np.sum(active)) >= self.min_duty_cycle:\r\n\t\t\t\tself.boost[i] += self.boost_inc\r\n\t\t\telse:\r\n\t\t\t\tself.boost[i] = max(self.boost[i] - self.boost_dec, 0)",
"def ct_calibrate(photons, material, sinogram, scale, correct=True):\n\n\t# Get dimensions and work out detection for just air of twice the side\n\t# length (has to be the same as in ct_scan.py)\n\n\tn = sinogram.shape[1]\n\n\t# work out value of a sinogram point of air\n\tv = ct_detect(photons, material.coeff('Air'), 2*n*scale,1)[0]\n\t\n\t# construct sinogram of air\n\tsinogram_air = v * np.ones(sinogram.shape)\n\t\n\t# perform calibration\n\tsinogram = -np.log( np.divide(sinogram, sinogram_air))\n\n\n\treturn sinogram",
"def update_step():\n # for all states\n for x in range(0, mapper.MAX_CELLS_X):\n for y in range(0, mapper.MAX_CELLS_Y):\n for a in range(0, mapper.MAX_CELLS_A):\n if loc.bel_bar[x, y, a]>0.0001:\n loc.bel[x, y, a] = np.prod(loc.gaussian(loc.obs_range_data, mapper.obs_views[x, y, a, :], loc.sensor_sigma)) * loc.bel_bar[x, y, a]\n loc.bel = loc.bel / np.sum(loc.bel) # normalize belief grid",
"def _calibrate_dl0(self, event, telid):\n waveforms_r0 = event.r0.tel[telid].waveform\n _, selected_gain_channel = self.gain_selector(waveforms_r0)\n\n waveforms_r1 = event.r1.tel[telid].waveform\n if self._check_r1_empty(waveforms_r1):\n return\n\n # Use R0-selected gain channels to select R1 waveforms\n _, n_pixels, _ = waveforms_r1.shape\n waveforms_gs = waveforms_r1[selected_gain_channel, np.arange(n_pixels)]\n if selected_gain_channel is not None:\n event.r1.tel[telid].selected_gain_channel = selected_gain_channel\n else:\n if event.r1.tel[telid].selected_gain_channel is None:\n raise ValueError(\n \"EventSource is loading pre-gainselected waveforms \"\n \"without filling the selected_gain_channel container\"\n )\n\n reduced_waveforms = self.data_volume_reducer(waveforms_gs)\n event.dl0.tel[telid].waveform = reduced_waveforms",
"def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep().wait()\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range, speed_sp=MAX_SPEED, stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep().wait()\n\n self.arm_motor.position = 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calibrate the bsc observations
|
def calibrate_BSC_data_v1p0(bsc_obs, site):
calib_path = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/' \
'Calibrations_for_LUMO_Ceilometers/'
filename = calib_path + site + '_window_trans_daily_cpro.pickle'
# sort site name out (is in CL31-A_BSC_KSS45W format, but needs to be CL31-A_KSS45W
# load calibration data (using pickle)
with open(filename, 'rb') as handle:
window_trans_daily = pickle.load(handle)
for i, time_i in zip(np.arange(len(bsc_obs[site]['time'])), bsc_obs[site]['time']):
# find date in window_trans_daily
time_idx = np.where(np.array(window_trans_daily['dates']) == time_i.date())
# apply calibration to bsc data
bsc_obs[site]['backscatter'][i, :] *= window_trans_daily['c_pro'][time_idx]
return bsc_obs
|
[
"def __calibrate(self):\n my_name = '__calibrate'\n\n # check that transient members are present\n if not (hasattr(self, \"_Spectrum__wave_solution\")):\n self.__calibrationWaveSolution()\n\n # calibrate\n self.__wavelength = self.__wave_solution(np.arange(self.__spectrum.size))\n self.__calibrated = True",
"def calibrate_all(self):\n print(\"disconnect battery and press Enter\")\n input()\n self._set_all(self.motor_max)\n print(\"connect battery and press enter\")\n input()\n time.sleep(2)\n self._set_all(self.motor_min)\n time.sleep(8)\n print(\"calibrated\")",
"def calibscin(Barrel_VectorSignals):\n\t#scincalibrations = [415.46911408615017, 415.49032413530045, 414.2257295618793, 413.52149204021396, 411.5992702498399, 410.64900009885315, 410.05084897192495, 410.2933477391751, 411.3409180068102, 412.31065478145797, 410.66077419019877, 411.16318351136084, 411.44777420544165, 411.9325417788443, 411.37997153809016, 411.388755835253, 411.8810225437739, 411.58323678675737, 411.40360405115445, 411.4363536068476, 411.1434892175137, 411.0311309144043, 411.45278182281913, 411.295529214398, 410.66722584949844, 411.95872061830596, 411.5134821865462, 411.8234427877747, 411.0133769672026, 410.49154689961557, 411.8200149562403, 411.4650824673985, 410.2795836068218, 410.99784486503626, 411.3719137475675, 411.1496647172716, 411.10361391486657, 411.01421880697376, 410.597406149097, 410.8673343110665, 410.4613021375798, 410.9360845492963, 410.9528787632441, 410.8574880829973, 411.03315409936096, 410.9379153502092, 411.01901911573697, 410.80356948239154, 411.0444263559407, 410.84313941730574, 410.94072813786545, 410.204939014593, 410.9738410909085, 410.3391884191118, 410.6721258934903, 410.51534130232534, 409.62060911658847, 408.77108688926836, 409.99777980214225, 409.5992379372193, 409.83732253845403, 410.210413105926, 409.1679944501218, 409.7662734551919, 408.11740676906703, 408.3924355876955, 408.03209385093214, 407.6136434771539, 407.49508269173134, 407.10864535201216, 406.49601594991765, 407.97586047362637, 407.6622587467224, 407.70767503648653, 408.04075414921584]*36\t\n\t#scincorrections = [411.41483745459146, 411.52181636082577, 410.25098543903624, 409.76481640097643, 407.8648162057763, 407.0433073672767, 406.4280147748465, 406.6202788727341, 407.6662499733772, 408.6662149874733, 407.04700222023894, 407.5469707080265, 407.8482602981088, 408.34396984724685, 407.8356135111425, 407.9006104165864, 408.4005126486694, 408.08624713560107, 408.0027574234192, 408.03512549563607, 407.7646802314756, 407.66902911324036, 408.13219631572156, 408.0856386283693, 407.38021535979243, 408.76602535322274, 408.3354792012611, 408.7493505933448, 407.97452518970755, 407.4711792144778, 408.78371452969293, 408.59189054866147, 407.3678692910586, 408.1980036194848, 408.6716381355258, 408.4772494588298, 408.48623697063414, 408.4283387274703, 408.0399995983036, 408.459433126158, 408.0009678326388, 408.4220840256861, 408.401124384891, 408.22498572575756, 408.2946388759221, 408.16241045483764, 408.2162208231712, 407.86038369360386, 408.10072483636185, 407.8308977000335, 407.8739032108822, 407.05402921668883, 407.81948812803313, 407.0379965972104, 407.2807418021031, 407.2472061094793, 406.2461313455679, 405.3115718891361, 406.46924320089, 406.04385491758575, 406.23962673351735, 406.485581819831, 405.37184790415176, 405.9200835792759, 404.174107208703, 404.3861928079304, 403.94869689782337, 403.3611910496827, 403.06973154521165, 402.5554299506423, 401.6688569907378, 402.8571098242905, 402.03193211770736, 401.6356578529606, 401.29776188137134]*36\n\t#containment = [0.9922257350540894, 0.9924029080295286, 0.9925314933889849, 0.9923710947914168, 0.9925005601234895, 0.9924571682663081, 0.9924736876499678, 0.9923107627997272, 0.9923717693883665, 0.992462552656521, 0.9924100067589308, 0.9924641871673259, 0.9923359607471853, 0.9924773952914967, 0.9924178141593918, 0.9924772080326696, 0.9922415412669103, 0.992379952512389, 0.9924161038441175, 0.9923936205889852, 0.9924236863162547, 0.9924316563576252, 0.9923568814774528, 0.9924405399594359, 0.9924611132742223, 0.9924930212297, 0.9923853174427201, 0.9923924066611056, 0.9924806854716944, 0.992390053683815, 0.9923893436872515, 0.9924596118186407, 0.9924440389467973, 0.9923388445611183, 0.9924589198963458, 0.9924056855957695, 0.9923602278246142, 0.9924218711993472, 0.9922791098871626, 0.9924370426348486, 0.9923948825562914, 0.9924726323036805, 0.9924027753718472, 0.9924520521026018, 0.9924006741483085, 0.9923441256883894, 0.9925201121485623, 0.9924546188776329, 0.9924782088159386, 0.9923947843091397, 0.9924698150227844, 0.9922952793358721, 0.9924636652757733, 0.9924582419298134, 0.9924127350793948, 0.9924450958333008, 0.9924155137751113, 0.992411906630523, 0.9924154871234933, 0.992445097600654, 0.9924117857125934, 0.9924140566552296, 0.9924301803483659, 0.9924252926230659, 0.992427658103939, 0.9924930313657724, 0.9925539612633278, 0.9924001191478654, 0.9924655317037001, 0.9924423192903769, 0.9925128053546892, 0.9925077413435955, 0.9923661833622193, 0.9920001139857957, 0.9762479738306956]\n\ts_cont = [408.21638950554075, 408.3954472740771, 407.1870232421094, 406.63875945884087, 404.8060585388971, 403.97304819147996, 403.3691105878475, 403.49367909804056, 404.55647780600043, 405.58591491094637, 403.9575182245898, 404.4757730162475, 404.72249522199195, 405.272159576985, 404.74332809708255, 404.83205898107536, 405.23195412471205, 404.9766105533868, 404.9085068798063, 404.9314555180952, 404.67532710488985, 404.58364980855805, 405.012793566413, 405.0007315500301, 404.30902206187204, 405.6974274788762, 405.2261341502687, 405.63975175649347, 404.90683641527, 404.37034541526305, 405.67260217215875, 405.5109490861691, 404.2898135363692, 405.07073526391474, 405.58981257625425, 405.3751447994642, 405.36549518339785, 405.3332161707569, 404.88956759976287, 405.37027184803094, 404.8980725551248, 405.34774082392767, 405.2984093045488, 405.14372480308344, 405.19187487160525, 405.03757034167137, 405.16280927227615, 404.7829216539207, 405.03107640207867, 404.7292557576276, 404.8025372723253, 403.9177916263665, 404.7460239584375, 403.96821450150077, 404.1905949169899, 404.1704924951662, 403.16496315846314, 402.2360298379118, 403.3863719919289, 402.9762332238292, 403.15699339382735, 403.4020052256797, 402.3032561236677, 402.8453577277423, 401.11356268338346, 401.3504783424065, 400.94087925309395, 400.29569405733, 400.0328154316862, 399.5130445431503, 398.66148407548866, 399.83880015591535, 398.96289406538807, 398.42261837089694, 391.76612693948175]*36\n\n\t#sequence is: scincalibrations are calibrations estimated at first by firing electrons \n\t#containment is containment tower by tower\n\t#scincorrections are the corrections to apply to get correct calibration constant to reconstruct energy deposited (s vector)\n\t#s_cont is s vector corrected by containment to reconstrct primary electron energy\n\t\n\tscincalibrations = [0.1]+s_cont\n\tif len(Barrel_VectorSignals) != len(scincalibrations):\n\t\tprint \"wrong calibration length s\"+str(len(Barrel_VectorSignals))+\" \"+str(len(scincalibrations))\n\t\tquit()\n\t\n\tcalib_Barrel_VectorSignals = [Barrel_VectorSignals[counter]*(1/(entry)) for counter, entry in enumerate(scincalibrations)]\n\treturn calib_Barrel_VectorSignals",
"def calibrating(self):\n self.current = State.CALIBRATING",
"def full_reset_and_calibrate(odrv0):\r\n\todrv0.erase_configuration()\r\n\tprint(\"Erased [1/7]\")\r\n\ttry: # Reboot causes loss of connection, use try to supress errors\r\n\t\todrv0.reboot()\r\n\texcept:\r\n\t\tpass\r\n\tprint(\"Rebooted [2/7]\")\r\n\todrv0 = odrive.find_any() # Reconnect to the Odrive\r\n\tprint(\"Connected [3/7]\")\r\n\todrv0.axis0.motor.config.pre_calibrated = True # Set all the flags required for pre calibration\r\n\todrv0.axis0.encoder.config.pre_calibrated = True\r\n\todrv0.axis0.encoder.config.use_index = True\r\n\todrv0.axis0.config.startup_encoder_index_search = True # Change startup sequence\r\n\todrv0.axis0.config.startup_closed_loop_control = True\r\n\todrv0.axis0.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE # Calibrate\r\n\tprint(\"Started calibration 1 [4/7]\", end=\"\")\r\n\twhile odrv0.axis0.current_state != AXIS_STATE_IDLE: # Wait for calibration to be done\r\n\t\ttime.sleep(0.1)\r\n\t\tprint(\".\", end=\"\")\r\n\todrv0.save_configuration()\r\n\r\n\tprint(\"\\nCalibration 1 complete [5/7]\")\r\n\tprint(\"now will begin calibration sequence for second axis\")\r\n\ttime.sleep(3)\r\n\todrv0.axis1.motor.config.pre_calibrated = True # Set all the flags required for pre calibration\r\n\todrv0.axis1.encoder.config.pre_calibrated = True\r\n\todrv0.axis1.encoder.config.use_index = True\r\n\todrv0.axis1.config.startup_encoder_index_search = True # Change startup sequence\r\n\todrv0.axis1.config.startup_closed_loop_control = True\r\n\todrv0.axis1.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE # Calibrate\r\n\tprint(\"Started calibration 2 [6/7]\", end=\"\")\r\n\twhile odrv0.axis1.current_state != AXIS_STATE_IDLE: # Wait for calibration to be done\r\n\t\ttime.sleep(0.5)\r\n\t\tprint(\".\", end=\"\")\r\n\r\n\tprint(\"\\nCalibration 2 complete [7/7]\")\r\n\r\n\t#closed loop control for both axis\r\n\todrv0.save_configuration()\r\n\todrv0.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\r\n\todrv0.axis1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\r\n\r\n\treturn odrv0",
"def bsc(x,delta):",
"def calibcher(Barrel_VectorSignalsCher):\n\t#chercalibrations = [106.37048257816659, 106.13146567275112, 105.86590903712592, 105.81634772611199, 105.70865247970764, 105.64190045805486, 105.6639555914862, 105.5283776283045, 105.65723353008754, 105.7715133071762, 105.5658607357618, 105.61417315615921, 105.70536383822864, 105.77758372693368, 105.77043752701637, 105.5506909020155, 105.70623524436972, 105.65209417795693, 105.73892093109146, 105.61237691382905, 105.61860853169827, 105.63069081304641, 105.63120977003204, 105.72883741949272, 105.64351076067801, 105.51191943482567, 105.75579206781183, 105.6255099492349, 105.52676483799216, 105.50729849902562, 105.49691196235294, 105.51232976499537, 105.49294739086051, 105.46743366546217, 105.39944256662419, 105.43093327017503, 105.42218126102728, 105.33353578857226, 105.26156500953066, 105.19128846086149, 105.31605046745744, 105.35904116180494, 105.31714780361818, 105.34284763793717, 105.31700630345172, 105.32123783068911, 105.3888770522753, 105.37422214386339, 105.52604928728498, 105.46278011845477, 105.28372058763158, 105.4206155499903, 105.44174663935398, 105.56551159861326, 105.43779145304754, 105.54146571740893, 105.55442493693165, 105.67687272835073, 105.64965481223994, 105.65043713995333, 105.39546198539618, 105.5315247334327, 105.46869729570527, 105.45532655577286, 105.45938285691861, 105.49087149606235, 105.44596875109438, 105.52868954952874, 105.53867506889421, 105.69754494826158, 105.86874369528533, 105.70806060408836, 105.80369193129457, 106.1513324795608, 106.22235486676682]*36\n\t#chercorrections = [103.89550278429044, 103.70085240913916, 103.47143663620487, 103.40757852713733, 103.32416049481843, 103.2595513611303, 103.2771449523405, 103.14870022496955, 103.26744048386126, 103.40758019116339, 103.21664103253485, 103.25622797189547, 103.34575627566997, 103.44938982761327, 103.45740559874831, 103.25964142067909, 103.36696644454749, 103.34908426908248, 103.46274313918914, 103.34676995410369, 103.38553010427495, 103.41998384923339, 103.4223873054996, 103.54772829312716, 103.47303521689206, 103.38099295102664, 103.64838254840568, 103.52882284094234, 103.47351928457604, 103.45766349134928, 103.4805975522724, 103.53959817237843, 103.54212246362432, 103.52461109374697, 103.49938164982743, 103.56077151211231, 103.57780504091401, 103.53601233216357, 103.49962719536036, 103.47251867627905, 103.57132307678117, 103.59517284094395, 103.49923289942933, 103.51277318075437, 103.43104568143158, 103.40570108524098, 103.44047928171713, 103.39003361389342, 103.5289699793597, 103.41222913634279, 103.2043558306036, 103.30902132105307, 103.29948932213777, 103.37716306057568, 103.24111809153652, 103.31134211867192, 103.25381251814082, 103.35638287866391, 103.34984997105141, 103.24469534151174, 102.97376718686942, 103.06332233060792, 102.9776969458658, 102.9126043760532, 102.87127991738039, 102.82959630902663, 102.72936950682644, 102.74096868887399, 102.625968873716, 102.70935760664186, 102.73558010993771, 102.45477830507473, 102.33168129960764, 102.49598661880496, 102.19939271116233]*36\n\tc_cont = [103.08779161895677, 102.91302749597065, 102.69865952763615, 102.61869191270468, 102.54928716539662, 102.48068194031679, 102.49984890080964, 102.35556540203991, 102.47969263317724, 102.6281510005559, 102.43322742473204, 102.47810836409134, 102.55371034296142, 102.67118096060427, 102.67297232291142, 102.48284061965019, 102.5649981010228, 102.56155933915096, 102.67809243921879, 102.56067521092992, 102.60224889784466, 102.63726587197354, 102.63191774143888, 102.76496337880408, 102.6929637252195, 102.60491403169074, 102.85913301772406, 102.741217657914, 102.69546934772463, 102.67035622618218, 102.69304228926421, 102.75886941001674, 102.75976221892324, 102.731492956408, 102.7188845221274, 102.77429845330465, 102.78649420797491, 102.75140309520445, 102.70051794706535, 102.68996042906552, 102.78365100098196, 102.8153738834064, 102.71292597825087, 102.73146416207084, 102.6450394621172, 102.61404003462839, 102.66675609739092, 102.60991640602225, 102.750246685674, 102.62575682868824, 102.42720794074478, 102.51305416968992, 102.52098979376447, 102.59751750679058, 102.45780037787654, 102.53083482963227, 102.47068539942974, 102.5721049950492, 102.56599170316093, 102.46469174495641, 102.19238017547394, 102.28148980648412, 102.19817435184497, 102.1330715125064, 102.09230341456059, 102.05765775486448, 101.9644426420847, 101.96014956820567, 101.85273676485993, 101.93311307596035, 101.96637882465569, 101.68716060542853, 101.55050000833062, 101.67603040894112, 99.77195006099979]*36\n\n\tchercalibrations = [0.1]+c_cont\n\tif len(Barrel_VectorSignalsCher) != len(chercalibrations):\n\t\tprint \"wrong calibration length \\n\"\n\t\tquit()\n\n\tcalib_Barrel_VectorSignalsCher = [Barrel_VectorSignalsCher[counter]*(1/(entry)) for counter, entry in enumerate(chercalibrations)]\n\treturn calib_Barrel_VectorSignalsCher",
"def calibrate_submodel(self):\n \n model = self.model \n cell = self.knowledge_base.cell\n nucleus = model.compartments.get_one(id='n')\n mitochondrion = model.compartments.get_one(id='m')\n cytoplasm = model.compartments.get_one(id='c')\n\n beta = self.options.get('beta')\n\n Avogadro = self.model.parameters.get_or_create(\n id='Avogadro',\n type=None,\n value=scipy.constants.Avogadro,\n units=unit_registry.parse_units('molecule mol^-1')) \n\n rnas_kb = cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType)\n undetermined_model_kcat = []\n determined_kcat = []\n for rna_kb, reaction in zip(rnas_kb, self.submodel.reactions):\n\n init_species_counts = {}\n \n modifier_species = self._degradation_modifier[reaction.name] \n init_species_counts[modifier_species.gen_id()] = modifier_species.distribution_init_concentration.mean\n \n rna_kb_compartment_id = rna_kb.species[0].compartment.id\n if rna_kb_compartment_id == 'c':\n rna_compartment = cytoplasm\n degradation_compartment = cytoplasm\n else:\n rna_compartment = mitochondrion\n degradation_compartment = mitochondrion \n\n rna_reactant = model.species_types.get_one(id=rna_kb.id).species.get_one(compartment=rna_compartment)\n\n half_life = rna_kb.properties.get_one(property='half-life').get_value()\n mean_concentration = rna_reactant.distribution_init_concentration.mean\n\n average_rate = utils.calc_avg_deg_rate(mean_concentration, half_life)\n \n for species in reaction.get_reactants():\n\n init_species_counts[species.gen_id()] = species.distribution_init_concentration.mean\n\n if model.parameters.get(id='K_m_{}_{}'.format(reaction.id, species.species_type.id)):\n model_Km = model.parameters.get_one(\n id='K_m_{}_{}'.format(reaction.id, species.species_type.id))\n if species.distribution_init_concentration.mean:\n model_Km.value = beta * species.distribution_init_concentration.mean \\\n / Avogadro.value / species.compartment.init_volume.mean\n model_Km.comments = 'The value was assumed to be {} times the concentration of {} in {}'.format(\n beta, species.species_type.id, species.compartment.name)\n else:\n model_Km.value = 1e-05\n model_Km.comments = 'The value was assigned to 1e-05 because the concentration of ' +\\\n '{} in {} was zero'.format(species.species_type.id, species.compartment.name)\n\n model_kcat = model.parameters.get_one(id='k_cat_{}'.format(reaction.id))\n\n if average_rate: \n model_kcat.value = 1.\n eval_rate_law = reaction.rate_laws[0].expression._parsed_expression.eval({\n wc_lang.Species: init_species_counts,\n wc_lang.Compartment: {\n rna_compartment.id: rna_compartment.init_volume.mean * \\\n rna_compartment.init_density.value,\n degradation_compartment.id: degradation_compartment.init_volume.mean * \\\n degradation_compartment.init_density.value}\n })\n if eval_rate_law:\n model_kcat.value = average_rate / eval_rate_law\n determined_kcat.append(model_kcat.value)\n else:\n undetermined_model_kcat.append(model_kcat) \n else: \n undetermined_model_kcat.append(model_kcat)\n \n median_kcat = numpy.median(determined_kcat)\n for model_kcat in undetermined_model_kcat:\n model_kcat.value = median_kcat\n model_kcat.comments = 'Set to the median value because it could not be determined from data' \n\n print('RNA degradation submodel has been generated')",
"def test_calibration_docstring(self):\n dtype = np.float64\n\n observed_prices = np.array(\n [[20.09689284, 10.91953054, 4.25012702, 1.11561839, 0.20815853],\n [3.34813209, 6.03578711, 10.2874194, 16.26824328, 23.73850935]],\n dtype=dtype)\n\n strikes = np.array(\n [[80.0, 90.0, 100.0, 110.0, 120.0], [80.0, 90.0, 100.0, 110.0, 120.0]],\n dtype=dtype)\n expiries = np.array([[0.5], [1.0]], dtype=dtype)\n forwards = 100.0\n is_call_options = np.array([[True], [False]])\n\n beta = np.array([0.5, 0.5], dtype=dtype)\n\n models, is_converged, _ = tff.models.sabr.calibration(\n prices=observed_prices,\n strikes=strikes,\n expiries=expiries,\n forwards=forwards,\n is_call_options=is_call_options,\n beta=beta,\n calibrate_beta=False,\n volvol=np.array([1.0, 1.0], dtype=dtype),\n volvol_lower_bound=0.0,\n volvol_upper_bound=10.0,\n rho=np.array([0.0, 0.0], dtype=dtype),\n rho_lower_bound=-0.75,\n rho_upper_bound=0.75,\n maximum_iterations=1000)\n\n [calibrated_alpha, calibrated_beta, calibrated_volvol, calibrated_rho,\n is_converged] = self.evaluate(\n [models.alpha, models.beta, models.volvol, models.rho, is_converged])\n\n with self.subTest('AllConverged'):\n self.assertTrue(all(is_converged))\n with self.subTest('AlphaRecovered'):\n self.assertAllClose(calibrated_alpha, [1.5, 2.5], atol=2e-3, rtol=2e-3)\n with self.subTest('BetaRecovered'):\n self.assertAllClose(calibrated_beta, [0.5, 0.5], atol=2e-3, rtol=2e-3)\n with self.subTest('VolVolRecovered'):\n self.assertAllClose(calibrated_volvol, [0.33, 0.66], atol=2e-2, rtol=5e-2)\n with self.subTest('RhoRecovered'):\n self.assertAllClose(calibrated_rho, [0.1, -0.1], atol=1e-2, rtol=5e-2)",
"def ads_self_calibrate(self, ads_num):\n\t\treturn self.config_ads(ads_num, 6, 0)",
"def refresh_calibrationImage(self):\n cal = self.constraints[self.calibrating_color]\n hn = cal['h_min']\n hx = cal['h_max']\n sn = cal['s_min']\n sx = cal['s_max']\n vn = cal['v_min']\n vx = cal['v_max']\n\n def fill(h, img):\n for y in range(0, 256):\n v = (vx - vn) * y / 256 + vn\n for x in range(0, 256):\n s = (sx - sn) * x / 256 + sn\n\n img[y, x, 0] = h\n img[y, x, 1] = s\n img[y, x, 2] = v\n\n fill(hn, self.swatchn)\n fill(hx, self.swatchx)\n self.swatchn = cv.cvtColor(self.swatchn, cv.COLOR_HSV2BGR)\n self.swatchx = cv.cvtColor(self.swatchx, cv.COLOR_HSV2BGR)\n hor = np.hstack((self.swatchn, self.swatchx))\n cv.imshow(\"cal\", hor)",
"def updateBsplines(self, refit=False):\n\n self.computeQindices()\n self.computeCindices()\n self.computeDindices()\n self.computeKnots()\n self.computeParameters()\n self.computeJacobian()\n if refit:\n self.computeControlPts()\n self.computePoints()",
"def adc_run_calibrate(self, c):\n raise Exception('Depricated. Use ADC Recalibrate instead')\n dev = self.selectedADC(c)\n info = c.setdefault(dev, {})\n filterFunc = info.get('filterFunc', np.array([255], dtype='<u1'))\n # Default to no stretch.\n filterStretchLen = info.get('filterStretchLen', 0)\n # Default to stretch at 0.\n filterStretchAt = info.get('filterStretchAt', 0)\n demods = (dict((i, info[i]) for i in\n range(dev.DEMOD_CHANNELS) if i in info))\n yield dev.runCalibrate()",
"def test_update_calibration(self):\n backend = FakeBackend()\n ref_old_value = 0.1\n ref_new_value = 0.3\n\n param = Parameter(\"to_calibrate\")\n schedule = ScheduleBlock(name=\"test\")\n schedule.append(Play(Constant(100, param), DriveChannel(0)), inplace=True)\n cals = Calibrations()\n cals.add_schedule(schedule, 0, 1)\n\n # Add init parameter to the cal table\n cals.add_parameter_value(\n value=ref_old_value,\n param=\"to_calibrate\",\n qubits=(0,),\n schedule=\"test\",\n )\n\n # Get old value\n old_value = cals.get_parameter_value(\"to_calibrate\", (0,), \"test\")\n\n exp = MockCalExperiment(\n physical_qubits=(0,),\n calibrations=cals,\n new_value=ref_new_value,\n param_name=\"to_calibrate\",\n sched_name=\"test\",\n )\n exp.run(backend).block_for_results()\n\n # Get new value\n new_value = cals.get_parameter_value(\"to_calibrate\", (0,), \"test\")\n self.assertNotEqual(old_value, new_value)\n\n # Validate calibrated schedule\n new_schedule = cals.get_schedule(\"test\", (0,))\n ref_schedule = schedule.assign_parameters({param: ref_new_value}, inplace=False)\n self.assertEqual(new_schedule, ref_schedule)",
"def _update_boost(self):\r\n\t\t\r\n\t\tfor i, active in enumerate(self.boutputs):\r\n\t\t\tif int(np.sum(active)) >= self.min_duty_cycle:\r\n\t\t\t\tself.boost[i] += self.boost_inc\r\n\t\t\telse:\r\n\t\t\t\tself.boost[i] = max(self.boost[i] - self.boost_dec, 0)",
"def ct_calibrate(photons, material, sinogram, scale, correct=True):\n\n\t# Get dimensions and work out detection for just air of twice the side\n\t# length (has to be the same as in ct_scan.py)\n\n\tn = sinogram.shape[1]\n\n\t# work out value of a sinogram point of air\n\tv = ct_detect(photons, material.coeff('Air'), 2*n*scale,1)[0]\n\t\n\t# construct sinogram of air\n\tsinogram_air = v * np.ones(sinogram.shape)\n\t\n\t# perform calibration\n\tsinogram = -np.log( np.divide(sinogram, sinogram_air))\n\n\n\treturn sinogram",
"def update_step():\n # for all states\n for x in range(0, mapper.MAX_CELLS_X):\n for y in range(0, mapper.MAX_CELLS_Y):\n for a in range(0, mapper.MAX_CELLS_A):\n if loc.bel_bar[x, y, a]>0.0001:\n loc.bel[x, y, a] = np.prod(loc.gaussian(loc.obs_range_data, mapper.obs_views[x, y, a, :], loc.sensor_sigma)) * loc.bel_bar[x, y, a]\n loc.bel = loc.bel / np.sum(loc.bel) # normalize belief grid",
"def _calibrate_dl0(self, event, telid):\n waveforms_r0 = event.r0.tel[telid].waveform\n _, selected_gain_channel = self.gain_selector(waveforms_r0)\n\n waveforms_r1 = event.r1.tel[telid].waveform\n if self._check_r1_empty(waveforms_r1):\n return\n\n # Use R0-selected gain channels to select R1 waveforms\n _, n_pixels, _ = waveforms_r1.shape\n waveforms_gs = waveforms_r1[selected_gain_channel, np.arange(n_pixels)]\n if selected_gain_channel is not None:\n event.r1.tel[telid].selected_gain_channel = selected_gain_channel\n else:\n if event.r1.tel[telid].selected_gain_channel is None:\n raise ValueError(\n \"EventSource is loading pre-gainselected waveforms \"\n \"without filling the selected_gain_channel container\"\n )\n\n reduced_waveforms = self.data_volume_reducer(waveforms_gs)\n event.dl0.tel[telid].waveform = reduced_waveforms",
"def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep().wait()\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range, speed_sp=MAX_SPEED, stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep().wait()\n\n self.arm_motor.position = 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates filename from site stinrg and day
|
def create_filename(ceilDatadir, site, day, fType):
# site id (short) and site str in filename
split = site.split('_')
site_id = split[-1]
bsc_site_name = split[0] + '_' + fType + '_' + split[-1]
# date for the main day
doyStr = day.strftime('%Y%j')
# time resolution of data in filename
if fType == 'MLH':
timestr = '15min'
elif fType == 'BSC':
timestr = '15sec'
elif fType == 'CLD':
timestr == '15sec'
elif fType == '':
raise ValueError('fType variable not given!')
else:
raise ValueError('fType argument is not recognised. Please choose MLH, BSC, CLD or add new fType')
# get filename
bsc_fname = ceilDatadir + bsc_site_name + '_' + doyStr + '_' + timestr + '.nc'
return bsc_fname, site_id
|
[
"def file_name(self):\n return datetime.strftime(self.creation_date, '%Y-%m-%d-') + self.slug + '.html'",
"def generate_file_name():\n import datetime\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n filename = \"game saved at {}\".format(now)\n return filename",
"def make_name(linkurl, topic):\n fileRegex = re.compile(r'^(.*)/(20\\d\\d)/(\\d\\d)/(\\d\\d)/(.*)$')\n mo = fileRegex.search(linkurl)\n date_part = mo.group(2)+'-'+mo.group(3)+'-'+mo.group(4)\n gen_name = date_part+'-'+topic+'-'+mo.group(5)+'.html'\n filename = os.path.join(POSTSDIR, gen_name)\n return filename",
"def __date_to_filename (date):\n return str (date.month) + '_' + str (date.year) + '.dat'",
"def gen_filename() -> str:\n return str(datetime.timestamp(datetime.now())).replace(\".\", \"\")",
"def _make_filename(url):\r\n # This is super naive.\r\n # Todo: Make filename when the crawler return per site\r\n # Todo: Make random filename if needed\r\n filename = url.split(\"/\")[-1]\r\n log.debug(\"Making filename: %s -> %s\", url, filename)\r\n return filename",
"def date_filename(date):\n\n return r'{}.csv'.format('{:%y_%m_%d}'.format(date))",
"def generate_filename_from_meta(path):\n _, extension = os.path.splitext(path)\n return get_file_tags(path)['created_date'].replace(':', '').replace(' ', '-') + extension.lower()",
"def get_file_name(self):\n data_file_name= os.path.join(self.data_path, \"{0}_to_{1}_{2}\".format(\n self.mars_dict['date'].split('/')[0],\n self.mars_dict['date'].split('/')[-1],\n self.mars_dict['levtype']))\n return data_file_name",
"def gdas1_fname_from_date(time):\n\n months = {\n 1: 'jan', 2: 'feb', 3: 'mar', 4: 'apr', 5: 'may', 6: 'jun',\n 7: 'jul', 8: 'aug', 9: 'sep', 10: 'oct', 11: 'nov', 12: 'dec'\n }\n week_no = ((time.day - 1) // 7) + 1\n\n # determine the current 7 days\n currentDate = dt.datetime.now()\n currentWeekNo = ((currentDate.day - 1) // 7) + 1\n currentday_start = (currentWeekNo - 1) * 7 + 1\n currentDate_weekstart = dt.datetime(\n currentDate.year,\n currentDate.month,\n currentday_start\n )\n if (time >= currentDate_weekstart) and (time <= currentDate):\n gdas1File = 'current7days'\n elif (time > currentDate):\n logger.info('GDAS1 file for input date is not ready yet.')\n raise FileNotFoundError\n elif (time < currentDate_weekstart):\n gdas1File = 'gdas1.{}{}.w{}'.format(\n months[time.month],\n time.strftime('%y'),\n week_no\n )\n\n return gdas1File",
"def get_filename(self):\n\n return \"-\".join([\n str(self.paper.module.code),\n str(self.paper.year_start),\n str(self.paper.year_stop),\n str(self.paper.sitting),\n PaperPDF.period_map[self.paper.period]]\n ) + \".pdf\"",
"def log_filename(domain, now=None):\n if not now:\n now = datetime.utcnow()\n return \"{}-{}.log\".format(now.strftime(\"%Y-%m-%dT%X\"), domain)",
"def filename(self):\n return (env_config.CONFIG.device_info_snapshot_file_format %\n str(self.date))",
"def get_date_path():\n date = dt.datetime.now()\n year = date.year\n month = stringTIME(date.month) # Do some string formatting\n day = stringTIME(date.day) # More string formatting\n path = '/year_{}/month_{}/day_{}/'.format(year, month, day)\n return path",
"def create_filename(self, title):\n slug = slugify(title)\n if slug in self.slugs:\n slug = slug + '_'\n if len(slug) > 100:\n slug = slug[0:100]\n self.slugs.append(slug)\n return slug + '.html'",
"def gen_filename(year, month):\n\n month = str(month).zfill(2)\n year = str(year)\n\n return \"hre\" + year + month + \".nc\"",
"def _safe_filename(filename):\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H%M%S\")\n basename, extension = os.path.splitext(os.path.basename(filename))\n return \"{0}-{1}.{2}\".format(basename, date, extension)",
"def filename_from_yearmonth(ym, stn_id):\n y4 = \"%04d\" % int(ym[0])\n y2 = y4[2:]\n m2 = \"%02d\" % int(ym[1])\n stn_id = str(stn_id).lower()\n return \"%s%s%s.wea\" % (stn_id, m2, y2)",
"def filename_with_timestamp(base_name):\n return base_name + FileUtils.timestamp_string() + \".jpg\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
deletes an organizer by name
|
def delete(self, name):
organizer = OrganizerModel.find_organizer_by_name(name)
if organizer:
organizer.delete_from_db()
return {"message": "Organizer deleted"}, 204
return {"message": "Organizer not found"}, 404
|
[
"def delete(self, name: str) -> None:\n sub_line = self._get_from(name)\n for _, department, clerk in sub_line:\n clerk.delete(department)",
"def remove_agent(self, *, agent_name: str) -> None:",
"def deleteAffordancesFromViewer(self, Viewer, obstacleName=\"\"):\n affs = self.getAffordanceTypes()\n if obstacleName == \"\":\n for aff in affs:\n self.deleteNode(aff, True, Viewer)\n else:\n import re\n\n for aff in affs:\n refs = self.getAffRefObstacles(aff)\n count = 0\n while count < len(refs):\n if refs[count] == obstacleName:\n toDelete = aff + \"-\" + refs[count]\n nodes = Viewer.client.gui.getGroupNodeList(aff)\n for node in nodes:\n splt = re.split(r\"\\.\", node)\n if splt[0] == toDelete:\n self.deleteNode(node, True, Viewer)\n count += 1\n return",
"def delName(self, value):\n value = valueToInt(value)\n if value < 0 or value > len(self._nameList) - 1:\n self.log.warning('invalid value: {0}'.format(value))\n return\n self._nameList.pop(value)",
"def deleteAffordancesByTypeFromViewer(\n self, affordanceType, Viewer, obstacleName=\"\"\n ):\n if obstacleName == \"\":\n Viewer.client.gui.deleteNode(affordanceType, True)\n else:\n import re\n\n affs = self.getAffordanceTypes()\n for aff in affs:\n if aff == affordanceType:\n refs = self.getAffRefObstacles(aff)\n count = 0\n while count < len(refs):\n if refs[count] == obstacleName:\n toDelete = aff + \"-\" + refs[count]\n nodes = Viewer.client.gui.getNodeList()\n for node in nodes:\n splt = re.split(r\"\\.\", node)\n if splt[0] == toDelete:\n self.deleteNode(node, True, Viewer)\n count += 1\n return",
"def organizer_pre_delete(sender, instance, **kwargs):\n for permission_code in Competition.get_organizer_permissions():\n remove_perm(permission_code, instance.user, instance.competition)",
"def delete(self, name):\n return self.sendCommand(Command(b'DELETE', _prepareMailboxName(name)))",
"def test_organization_id_delete(self):\n pass",
"def do_remove(self, arg):\n for investigator in pool.investigators:\n if arg == str(investigator):\n if investigator in selected:\n selected.remove(investigator)\n print('%s has been deselected.' % arg)\n print()\n self._print_selected()\n else:\n print('%s was not selected.' % arg)\n return\n\n print('Unknown investigator: select an investigator to remove (double TAB to autocomplete).')",
"def delete(self, name):\n item = [i for i in _fake_db.get('items') if i.get('name') == name]\n if not item :\n return '', 301\n \n _fake_db['items'].remove(item[0])\n return '', 204",
"def delete_personinfo(personinfo):\n personinfo.delete_personinfo()",
"def delete(self):\n for repo in self.get_repositories():\n repo.delete()\n self.gitea.requests_delete(Organization.API_OBJECT.format(name=self.username))\n self.deleted = True",
"def handle_actor_delete_request(name):\n name = name.replace(\"_\", \" \")\n if name in ACTORS:\n del ACTORS[name]\n return make_response(jsonify(\"Deleted Successfully\"), 201)\n else:\n return make_response(jsonify(\"Actor not in database.\"), 400)",
"def test_organizer_unorganized_unorganized(organizer_unorganized: Organizer):\n unorganized = organizer_unorganized.unorganized\n assert len(unorganized) == 2",
"def delete(self, aggregator_id):\n le_aggregator = get_a_aggregator(aggregator_id)\n if not le_aggregator:\n return {'success': False, 'msg': 'aggregator does not exist'}\n else:\n db.session.delete(le_aggregator)\n db.session.commit()\n return {'success': True, 'message': 'officer deleted successfully'}",
"def delete_Amenity(amenity_id):\n del_obj = storage.get(Amenity, amenity_id)\n if del_obj is not None:\n storage.delete(del_obj)\n storage.save()\n return jsonify({})\n else:\n abort(404)",
"def delete_field(self, name: str) -> None:\n self._post_field(\"delete-field\", name=name)",
"def test_owner_delete(self):\n self.test_owner_add()\n data = {'user': 'tester'}\n user = self.organization.owners.get(username='tester')\n owner = OrganizationOwner.objects.get(\n organization=self.organization,\n owner=user,\n )\n resp = self.client.post(\n '/organizations/mozilla/owners/{}/delete/'.format(owner.pk),\n data=data,\n )\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(\n resp['location'],\n '/organizations/mozilla/owners/',\n )\n self.assertEqual(self.organization.owners.count(), 1)",
"def test_view_can_delete_organization(self):\n res = self.client().post('/api/organizations/', data=self.org_data)\n self.assertEqual(res.status_code, 201)\n results = json.loads(res.data.decode())\n response = self.client().delete(\n '/api/organizations/{}'.format(results['id']))\n self.assertEqual(response.status_code, 202)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get all unique combinations of two lists in Python
|
def unique_comb_of_two_lists(A, B):
res = []
for p in permutations(A, len(B)):
zipped = zip(p, B)
res.append(list(zipped))
return res
|
[
"def union(list1, list2):\n\n return list(set(list1) | set(list2))",
"def list_reunion(list1, list2):\n return list(set(list1) | set(list2))",
"def list_union(a, b):\n c = list(copy(a))\n for item in b:\n if item not in a:\n c.append(item)\n return c",
"def union(list_a, list_b):\n all_values = list_a.values()\n all_values.extend(list_b.values())\n return make_linked_list(set(all_values))",
"def pairs(list1, list2):\n combinations = [[i, j] for i in list1 for j in list2]\n # for i in list1:\n # for j in list2:\n # combinations.extend([[i,j]])\n\n return combinations",
"def cartesian_product(a,b):\n return [(x,y) for x in a for y in b ]",
"def permut2lists(a,b):\n solucao = []\n for i,j in itertools.product(a,b):\n solucao.append([i,j])\n return solucao",
"def get_cartesian_product(lists):\n return [i for i in itertools.product(*lists)]",
"def set_union(nodes1, nodes2):\n output = [n for n in nodes1]\n for node2 in nodes2:\n if not node_in_nodes(node2, nodes1):\n output.append(node2)\n return output",
"def match_cross(lsts):\n return list(map(list, zip(*itertools.product(*lsts))))",
"def union(llist_a, llist_b):\n union_set = LinkedList()\n for llist in (llist_a, llist_b):\n node = llist.head\n while node is not None:\n if node.value not in union_set.nodes:\n union_set.push(node.value)\n\n node = node.next\n\n return union_set",
"def match_cross2(lsts):\n return list(reversed(list(map(list, zip(*itertools.product(*reversed(lsts)))))))",
"def cartesian_product(a, b):\n return np.vstack([np.repeat(a, len(b)), np.tile(b, len(a))]).T",
"def cartesian(lst1, lst2):\r\n list3 = []\r\n for i in range(len(lst1)):\r\n for j in range(len(lst2)):\r\n list3.append([lst1[i],lst2[j]]) #add in a loop each component\r\n #within lst1 to each component in lst2\r\n return list3",
"def cartesian( v1, v2 ):\n return tuple([(x,y) for x in v1 for y in v2])",
"def cross(a,b):\r\n return [s + t for s in a for t in b]",
"def get_pieces_combinations(pieces):\n return set(list(itertools.permutations(pieces)))",
"def common_elements(set_1, set_2):\n unique_ele = set()\n ret_lst = []\n\n for ele in set_1:\n unique_ele.add(ele)\n for ele2 in set_2:\n if ele2 in unique_ele:\n ret_lst.append(ele2)\n return ret_lst",
"def ordered_union(a: Iterable, b: Iterable) -> OrderedSet:\n assert not isinstance(a, str) # treat string as atomic, not iterable\n assert not isinstance(b, str) # treat string as atomic, not iterable\n a = OrderedSet(a)\n for v in b:\n if v not in a:\n a.add(v)\n return a"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns an array of the captured image
|
def getImageFromCam(self):
with picamera.array.PiRGBArray(self.CAMERA) as output:
self.CAMERA.capture(output, 'rgb')
print('Captured %dx%d image' % (output.array.shape[1], output.array.shape[0]))
return output.array
|
[
"def grab_one(self):\n\n camera_array = self._get_camera_array()\n\n size = camera_array.GetSize()\n\n result = []\n\n for i in range(size):\n grab_result = camera_array[i].GrabOne(self._TIME_OUT)\n image_array = self.post_processing(grab_result).GetArray()\n grab_result.Release()\n result.append(image_array)\n\n return result",
"def image_array(self):\n \n # Retrieve all of the hex digits in the list.\n # NOTE: ? digits are interpreted as having a value of 0.\n digits = self.digits()\n imgarray = [0 if digit == -1 else digit for digit in digits]\n \n # Each line in a bytes file contains 40 digits. The last line of the\n # file, however, may contain less than 40 digits. In order to create\n # a non-jagged 2D array, we need to reduce the number of pixels to the\n # largest multiple of 40.\n lines = len(imgarray) // 40\n imgarray = imgarray[:lines*40]\n \n # Reshape the array of pixels into a 2D array containing 40 columns\n # and a number of rows equivalent to the number of rows in the file\n # (potentially minus 1 row).\n imgarray = np.reshape(imgarray, (lines, 40)) \n \n # Turn the list into a numpy array.\n imgarray = np.array(imgarray)\n \n return imgarray",
"def testimg():\n return testimage().array()",
"def screen_shot_to_object(self) -> np.ndarray:\n pic_path = self.screen_shot()\n # temp file will be automatically removed after usage\n data = cv2.imread(pic_path, cv2.COLOR_RGB2GRAY)\n os.remove(pic_path)\n return data",
"def cam2numpy(self):\n nao_image = self.video.getImageRemote(self.subscriber)\n image_width = nao_image[0]\n image_height = nao_image[1]\n array = nao_image[6]\n frame = Image.frombytes(\"RGB\", (image_width, image_height), array)\n frame = np.array(frame)\n return frame",
"def main():\n \n with open(\"a1422866084343.jpg\",\"rb\") as fileh:\n \n s = fileh.read()\n \n image_file = StringIO(s)\n \n #print dir(image_file)\n \n image = Image.open(image_file)\n \n array = numpy.array(image) \n \n print type( array )",
"def _to_array(path_to_image):\n return im.imread(path_to_image) # Read image as is",
"def read(self):\n\t\t# This code is based on the picamera example at:\n\t\t# http://picamera.readthedocs.org/en/release-1.0/recipes1.html#capturing-to-an-opencv-object\n\t\t# Capture a frame from the camera.\n\t\tdata = io.BytesIO()\n\t\twith picamera.PiCamera() as camera:\n\t\t\tcamera.capture(data, format='jpeg')\n\t\tdata = np.fromstring(data.getvalue(), dtype=np.uint8)\n\t\t# Decode the image data and return an OpenCV image.\n\t\timage = cv2.imdecode(data, 1)\n\t\t# Save captured image for debugging.\n\t\tcv2.imwrite(DEBUG_IMAGE, image)\n\t\t\n\t\t# Return the captured image data.\n\t\treturn image",
"def array_from_img(image):\n return np.array(image)",
"def snapshot(camera):\n #lamps(GPIO.HIGH)\n #reference to camera capture\n with PiRGBArray(camera) as raw:\n #raw = PiRGBArray(camera) \n #get image from camera\n lamps(GPIO.HIGH)\n camera.capture(raw, format='bgr')\n lamps(GPIO.LOW)\n #print('Captured')\n imRef = raw.array\n \n return imRef",
"def get_images(self):\n return [env.render(mode='rgb_array') for env in self.list_env]",
"def get_camimage(self):\n return self._camimg",
"def get_images(self):\n pass",
"def get_result(self):\n arr = BytesIO()\n self.composition.save(arr, format=\"PNG\")\n arr.seek(0)\n return arr",
"def read_saved_screenshot_png_to_array(self, screen_id):\n if not isinstance(screen_id, baseinteger):\n raise TypeError(\"screen_id can only be an instance of type baseinteger\")\n (width, height, data) = self._call(\"readSavedScreenshotPNGToArray\",\n in_p=[screen_id])\n return (width, height, data)",
"def get_image(self):",
"def Captures(self) -> CaptureCollection:",
"def render(self):\n time_stamps = []\n cam_imgs = []\n cur_time = rospy.get_time()\n\n for recorder in self._cameras:\n stamp, image = recorder.get_image()\n print(\"stamp:\", stamp)\n logging.getLogger('robot_logger').error(\"Checking for time difference: Current time {} camera time {}\".format(cur_time, stamp))\n if abs(stamp - cur_time) > 10 * self._obs_tol: # no camera ping in half second => camera failure\n logging.getLogger('robot_logger').error(\"DeSYNC - no ping in more than {} seconds!\".format(10 * self._obs_tol))\n raise Image_Exception\n time_stamps.append(stamp)\n cam_imgs.append(image)\n\n if self.ncam > 1:\n for index, i in enumerate(time_stamps[:-1]):\n for j in time_stamps[index + 1:]:\n if abs(i - j) > self._obs_tol:\n logging.getLogger('robot_logger').error('DeSYNC- Cameras are out of sync!')\n raise Image_Exception\n\n images = np.zeros((self.ncam, self._height, self._width, 3), dtype=np.uint8)\n for c, img in enumerate(cam_imgs):\n images[c] = img[:, :, ::-1]\n\n return images",
"def read_saved_thumbnail_png_to_array(self, screen_id):\n if not isinstance(screen_id, baseinteger):\n raise TypeError(\"screen_id can only be an instance of type baseinteger\")\n (width, height, data) = self._call(\"readSavedThumbnailPNGToArray\",\n in_p=[screen_id])\n return (width, height, data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructor for Book Class.
|
def __init__(self, author, title):
self.author = author
self.title = title
|
[
"def __init__(self, book_name, book_author, book_year=None, rates=[]):\n self.book_name = book_name\n self.book_author = book_author\n self.book_year = book_year\n self.owner = None\n self.__rates = rates",
"def __init__(self, bible, trad=False):\n\n self._bible = bible\n self._books = [0] * 27\n self._trad = trad\n self.loadBible()",
"def __init__(self, node=None, path=None):\n super(StylesBookDocument, self).__init__(node)\n self._path = path\n _stylesbook_element = self.getFirstChildNodeByTagName(const.TAG_STYLESBOOK)\n\n self._stylesbook_element = StylesBookElement(_stylesbook_element, self)\n\n if not _stylesbook_element:\n self.appendChild(self._stylesbook_element)\n\n self.setVersion(const.VERSION)",
"def __init__(self, section_title,articles_raw,date):\n self.section_title = section_title\n self.articles_raw = articles_raw\n self.date = date",
"def __init__(self,name,age,year):\r\n #Person.__init__(self,name, age)\r\n super().__init__(name,age)\r\n self.year = year",
"def __init__ ( self ) :\n\n None",
"def __init__(self, title, year):\n self.title = title\n self.year = year\n # id is a field that is required for rendering of the website later\n self.id = \"-\".join(title.split())",
"def create_book(book_data: tuple) -> Book:\n author = input(\"Enter author name: \")\n new_book = Book(book_data[0], book_data[1], book_data[2], author)\n return new_book",
"def __init__(self, book_code=None, created=None, changed_by=None, updated=None, id=None, crm_id=None, organization_id=None, coupon_book_definition_id=None, name=None, cost=None, remaining_coupons=None, max_remaining_coupons=None, maximum_coupon_uses=None, deleted=False):\n self.swagger_types = {\n 'book_code': 'str',\n 'created': 'datetime',\n 'changed_by': 'str',\n 'updated': 'datetime',\n 'id': 'str',\n 'crm_id': 'str',\n 'organization_id': 'str',\n 'coupon_book_definition_id': 'str',\n 'name': 'str',\n 'cost': 'float',\n 'remaining_coupons': 'int',\n 'max_remaining_coupons': 'int',\n 'maximum_coupon_uses': 'int',\n 'deleted': 'bool'\n }\n\n self.attribute_map = {\n 'book_code': 'bookCode',\n 'created': 'created',\n 'changed_by': 'changedBy',\n 'updated': 'updated',\n 'id': 'id',\n 'crm_id': 'crmID',\n 'organization_id': 'organizationID',\n 'coupon_book_definition_id': 'couponBookDefinitionID',\n 'name': 'name',\n 'cost': 'cost',\n 'remaining_coupons': 'remainingCoupons',\n 'max_remaining_coupons': 'maxRemainingCoupons',\n 'maximum_coupon_uses': 'maximumCouponUses',\n 'deleted': 'deleted'\n }\n\n self._book_code = book_code\n self._created = created\n self._changed_by = changed_by\n self._updated = updated\n self._id = id\n self._crm_id = crm_id\n self._organization_id = organization_id\n self._coupon_book_definition_id = coupon_book_definition_id\n self._name = name\n self._cost = cost\n self._remaining_coupons = remaining_coupons\n self._max_remaining_coupons = max_remaining_coupons\n self._maximum_coupon_uses = maximum_coupon_uses\n self._deleted = deleted",
"def __init__(self, filepath=None, filename=None, pages=[]):\r\n\r\n self.filepath = filepath\r\n self.inputPdf = PdfFileReader(self.filepath, \"rb\")\r\n self.docInfo = self.inputPdf.getDocumentInfo()\r\n self.author = self.docInfo.author\r\n self.title = self.docInfo.title\r\n self.filename = filename\r\n self.numPages = self.inputPdf.getNumPages()\r\n self.pages = pages # List of page objects\r",
"def __init__(self, title, developer, publisher):\r\n self.title = title\r\n self.developer = developer\r\n self.publisher = publisher\r\n self.esrb = \"rp\" # Default value\r",
"def __init__(self, book, repository_uri='http://aleph.gutenberg.org'):\n if not repository_uri:\n raise ValueError(\"Please set the URI of a 'local' gutenberg text repository.\")\n\n if \"http://www.gutenberg.org/files\" in repository_uri:\n raise ValueError(\n \"\"\"\n Please create a local repository. More information on:\n https://www.gutenberg.org/wiki/Gutenberg:Information_About_Robot_Access_to_our_Pages\n \"\"\"\n )\n\n self._temporary_dir = Path(mkdtemp(prefix=\"dhtk-\"))\n self._repository_uri = repository_uri\n self.book = book",
"def __init__(self):\n this = _coin.new_ScXMLDocument()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self,post_code, price_min, price_max, bedroom, bathroom):\n self.post_code = post_code\n self.price_min = price_min\n self.price_max = price_max\n self.bedroom = bedroom\n self.bathroom = bathroom",
"def __init__(self, first_name, last_name, sex, age):\n self.first_name = first_name\n self.last_name = last_name\n self.sex = sex\n self.age = age",
"def create(self, book):\n return super(BookRepository, self).create(book)",
"def __init__(self, title=None, length=None):\n self.title = self.Title\n if title is not None:\n self.title = title\n\n self.mlength = self.Length\n if length is not None:\n self.mlength = length\n\n self.name = self.title.lower()",
"def __init__(self, first_name, last_name, age, gender, location):\r\n \r\n self.first_name = first_name\r\n self.last_name = last_name\r\n self.age = age\r\n self.gender = gender\r\n self.location = location",
"def __init__(self, studentID, name):\n self.__studentID = studentID\n self.__name = name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints title and author.
|
def display(self):
bookinfo = '"{}, written by {}"'.format(self.title, self.author)
print bookinfo
|
[
"def printInfo(self):\r\n\r\n about = \"Student name is {0}, {1}, and {2} is taking {3}.\".format(\r\n self.lastName, self.firstName, self.pronoun, len(self._courseList))\r\n\r\n print(about)",
"def print_infoheader():\n\tprint(\" _______.__ _______.\")\n\tprint(\"|_ _|__|.-----.--.--.| __|.----.----.-----.-----.-----.\")\n\tprint(\" | | | || | | ||__ || __| _| -__| -__| |\")\n\tprint(\" |___| |__||__|__|___ ||_______||____|__| |_____|_____|__|__|\")\n\tprint(\" |_____| © P.Bartels - https://www.kangafoo.de\\n\")",
"def print_header():\n\n print('------------------------------------')\n print(' CAT FACTORY')\n print('------------------------------------')",
"def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")",
"def _write_title(self) -> None:\n self.doc.preamble.append(Command('title', self.json_summary[\"title\"]))\n self.doc.preamble.append(Command('author', f\"FastEstimator {fe.__version__}\"))\n self.doc.preamble.append(Command('date', NoEscape(r'\\today')))\n self.doc.append(NoEscape(r'\\maketitle'))",
"def print_authors(authors: dict[str, str]) -> None:\n print(rpipes.terminal.move_y(rpipes.terminal.height // 2 - len(authors) // 2), end=\"\")\n\n for author in authors:\n print(rpipes.terminal.move_right(2), end=\"\")\n print(\n rpipes.terminal.link(\n authors[author],\n rpipes.terminal.white_bold\n + author\n + rpipes.terminal.normal\n + \" - \"\n + authors[author],\n )\n ) # Not all terminals support links so it also prints the url next to the author\n\n print(\n rpipes.terminal.move(rpipes.terminal.height - 3, rpipes.terminal.width - 20)\n + f\"Press {rpipes.terminal.white_bold}B{rpipes.terminal.normal} to go back\"\n )\n draw_boundary()",
"def display_author(self):\n self.screen.blit(self.author, (0, 620))",
"def print_meta(self):\n self.logger.handle('Author: {}'.format(self.meta['author']), Logger.HEADER)\n self.logger.handle('Module name: {}, version {}'.format(self.meta['name'], self.meta['version']), Logger.HEADER)\n self.logger.handle('Description: {}'.format(self.meta['description']), Logger.HEADER)",
"def _print_metadata(title: str, value: str) -> None:\n print(f'{c.Fore.CYAN}{title!s:<22}: {c.Fore.RESET}{c.Style.BRIGHT}{value}')",
"def print_student_info(self):\n print(\"ID: %s\" % self.__ID)\n print(\"Name: %s, %s\" % (self.__last_name, self.__first_name))",
"def print_entries(self):\n print \"Entries for |-\" + self.title + \"-| (id: \" + self.id + \"): \"\n for entry in self.entries:\n print '- {} said {} on {}'.format(entry.author, entry.comment, entry.timestamp)",
"def author_name(self) -> str:",
"def print_torrent(self):\n print('Title: %s' % self.title)\n print('URL: %s' % self.url)\n print('Category: %s' % self.category)\n print('Sub-Category: %s' % self.sub_category)\n print('Magnet Link: %s' % self.magnet_link)\n print('Torrent Link: %s' % self.torrent_link)\n print('Uploaded: %s' % self.created)\n print('Comments: %d' % self.comments)\n print('Has Cover Image: %s' % self.has_cover)\n print('User Status: %s' % self.user_status)\n print('Size: %s' % self.size)\n print('User: %s' % self.user)\n print('Seeders: %d' % self.seeders)\n print('Leechers: %d' % self.leechers)",
"def test_author(title_page):\n assert title_page.title.one_line(), \"Author is provided\"",
"def title(self, text):\n bold_text = terminal.bold(text)\n message = \"\\n{text}\\n\".format(text=terminal.underline(bold_text))\n self.write(message)",
"def favorite_book(title):\n\tprint(\"One of my favorite books is \" + title.title())",
"def display_book(book_name):\n\tprint(\"One of my favourite books is \" + book_name + \".\")",
"def print_header():\r\n\tprint('\\n')\r\n\tprint('======================================================================')\r\n\tprint('######## ## ## ######## ## ####### ## ## #### ######## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ##### ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ####### ## ######## ####### ## ## #### ## ')\r\n\tprint('======================================================================')\r\n\tprint('\\n')",
"def print_desc(self):\n print(self.description)\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Toggle archiving the channel after the fact.
|
async def tempChannelsArchive(self, ctx: Context):
guildConfig = self.config.guild(ctx.guild)
archiving = await guildConfig.get_attr(KEY_ARCHIVE)()
if archiving:
archiving = False
self.logger.info(
"%s (%s) DISABLED archiving the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":negative_squared_cross_mark: TempChannel: Archiving disabled. "
" The channel will be deleted after its lifetime expires."
)
else:
archiving = True
self.logger.info(
"%s (%s) ENABLED archiving the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel: Archiving enabled. The channel "
"will have ALL user permissions revoked after its lifetime "
"expires, and will be renamed with the date and time that it "
"was archived."
)
await guildConfig.get_attr(KEY_ARCHIVE).set(archiving)
|
[
"def archive(self, channel_name):\n # Might not need to do this since we now do this in `stale`\n if self.ignore_channel(channel_name):\n self.logger.debug(\"Not archiving #%s because it's in ignore_channels\", channel_name)\n return\n\n if self.config.activated or self.config.archiver_activated:\n self.logger.debug(\"Announcing channel closure in #%s\", channel_name)\n self.post_marked_up_message(channel_name, self.closure_text, message_type='channel_archive')\n\n members = self.slacker.get_channel_member_names(channel_name)\n say = \"Members at archiving are {}\".format(\", \".join(sorted(members)))\n self.logger.debug(\"Telling channel #%s: %s\", channel_name, say)\n self.post_marked_up_message(channel_name, say, message_type='channel_archive_members')\n\n self.action(\"Archiving channel #{}\".format(channel_name))\n payload = self.slacker.archive(channel_name)\n if payload['ok']:\n self.logger.debug(\"Slack API response to archive: %s\", json.dumps(payload, indent=4))\n self.action(\"Archived {}\".format(channel_name))\n else:\n error = payload.get('error', '!! No error found in payload %s !!' % payload)\n self.logger.error(\"Failed to archive %s: %s. See https://api.slack.com/methods/channels.archive for more context.\", channel_name, error)\n\n return payload\n else:\n self.action(\"[DRY RUN] Archived {}\".format(channel_name))",
"def archive(self):\n if self.status == 'delivered' and not self.is_archived:\n self.message = sha1(self.message.encode('utf-8')).hexdigest()\n self.is_archived = True\n self.save()\n else:\n return False",
"def archive(self, **kwargs):\n\t\tself.__transact.is_archive = not self.__transact.is_archive\n\t\treturn self.__save()",
"def safe_archive(self, channel_name):\n self.logger.debug(\"Evaluating #%s for archival\", channel_name)\n\n # Might not need to do this since we now do this in `stale`\n if self.slacker.channel_has_only_restricted_members(channel_name):\n self.logger.debug(\"Would have archived #%s but it contains only restricted users\", channel_name)\n return\n\n today = date.today()\n if today >= self.earliest_archive_date:\n self.archive(channel_name)\n else:\n self.logger.debug(\"Would have archived #%s but it's not yet %s\", channel_name, self.earliest_archive_date)",
"def to_be_archived(row):\n return row[\"Priority\"] == \"Done\"",
"def archived(self):\n return self.is_archived",
"def action_archive(self, message):\n message.archive()",
"async def abandon(self, ctx):\n\n async with self.bot.db.execute(\"SELECT category_id FROM categories WHERE setting = ? AND guild_id = ?\",\n [\"mapset_archive\", int(ctx.guild.id)]) as cursor:\n guild_archive_category_id = await cursor.fetchone()\n if not guild_archive_category_id:\n await ctx.send(\"no archive category set for this server\")\n return\n\n async with self.bot.db.execute(\"SELECT user_id FROM mapset_channels WHERE user_id = ? AND channel_id = ?\",\n [int(ctx.author.id), int(ctx.channel.id)]) as cursor:\n mapset_owner_check = await cursor.fetchone()\n\n async with self.bot.db.execute(\"SELECT mapset_id FROM mapset_channels WHERE channel_id = ?\",\n [int(ctx.channel.id)]) as cursor:\n is_mapset_channel = await cursor.fetchone()\n\n if not is_mapset_channel:\n await ctx.send(\"This is not a mapset channel\")\n return\n\n if not (mapset_owner_check or await permissions.is_admin(ctx)):\n await ctx.send(f\"{ctx.author.mention} this is not your mapset channel\")\n return\n\n await self.bot.db.execute(\"DELETE FROM mod_tracking WHERE channel_id = ?\", [int(ctx.channel.id)])\n await self.bot.db.execute(\"DELETE FROM mod_post_history WHERE channel_id = ?\", [int(ctx.channel.id)])\n await self.bot.db.commit()\n await ctx.send(\"untracked everything in this channel\")\n\n archive_category = ctx.guild.get_channel(int(guild_archive_category_id[0]))\n if not archive_category:\n await ctx.reply(\"i am unable to locate the archive category. it was probably deleted.\")\n return\n\n try:\n await ctx.channel.edit(reason=\"mapset abandoned\", category=archive_category)\n await ctx.send(\"moved to archive\")\n except (discord.Forbidden, discord.HTTPException) as e:\n await ctx.send(embed=await exceptions.embed_exception(e))",
"async def announcechannel(self, ctx, channel: discord.TextChannel):\n self.settings.set_announce_channel(ctx.guild.id, channel.id)\n await ctx.tick()",
"def should_be_archived(self):\n if self.status in [constants.COLLECTING, constants.ARCHIVED]:\n return False\n # TODO this is not efficient, is there a better way?\n for mr in self.moderation_requests.all():\n if not mr.is_approved():\n return False\n return True",
"def download_on():\n mw.note_download_action.setEnabled(True)\n mw.side_download_action.setEnabled(True)\n mw.manual_download_action.setEnabled(True)",
"def archive(self):\n assert self.confirmed and self.finalized or not self.finalizable\n\n self.archived = True\n self.active = False\n\n session = object_session(self)\n\n def future_periods():\n p = session.query(Period)\n p = p.order_by(desc(Period.execution_start))\n p = p.with_entities(Period.id)\n\n for period in p:\n if period.id == self.id:\n break\n yield period.id\n\n # get the activities which have an occasion in a future period\n f = session.query(Occasion)\n f = f.with_entities(Occasion.activity_id)\n f = f.filter(Occasion.period_id.in_(tuple(future_periods())))\n\n # get the activities which have an occasion in the given period but\n # no occasion in any future period\n o = session.query(Occasion)\n o = o.filter(Occasion.period_id == self.id)\n o = o.filter(not_(Occasion.activity_id.in_(f.subquery())))\n o = o.options(joinedload(Occasion.activity))\n\n # archive those\n for occasion in o:\n if occasion.activity.state == 'accepted':\n occasion.activity.archive()\n\n # also archive all activities without an occasion\n w = session.query(Occasion)\n w = w.with_entities(distinct(Occasion.activity_id))\n\n # XXX circular import\n from onegov.activity.models.activity import Activity\n\n a = session.query(Activity)\n a = a.filter(not_(Activity.id.in_(w.subquery())))\n a = a.filter(Activity.state == 'accepted')\n\n for activity in a:\n activity.archive()",
"def unarchive():",
"def archive_cards(self):\n if not self.trello_board_archive:\n print(\"No archive board specified in config\")\n return\n\n td = datetime.timedelta(seconds=DAYS_TO_KEEP*24*60*60)\n completed_cards = self.list_trello_cards([ABORTED,COMPLETED])\n archived = False\n for card in completed_cards.values():\n date = self.description_to_dict(card.description).get(\"Date\")\n if not date:\n continue\n try:\n started = datetime.datetime.strptime(date[0],\"%y%m%d\")\n except ValueError:\n continue\n if datetime.datetime.utcnow() - started > td:\n archive_list_name = started.strftime(\"%b %Y\")\n print(\"Archiving card {} to list {}, run started on {}\".format(card.name,archive_list_name,date[0]))\n card.fetch()\n self.trello.change_list(card,archive_list_name,board_id=self.trello_board_archive.id)\n archived = True\n # Sort the lists on the board and then the cards in the list\n if archived:\n self.trello.sort_lists_on_board(self.trello_board_archive, key=self._chronologically)\n for lst in self.trello_board_archive.all_lists():\n self.trello.sort_cards_on_list(lst)",
"def toggle_to_export(self) -> bool:\n assert \"extract\" in self.current_queue\n\n # Get the currently playing extract\n cur_song = self.current_track()\n filepath = cur_song['abs_fp']\n\n if filepath:\n extract: ExtractFile = (session\n .query(ExtractFile)\n .filter_by(filepath=filepath)\n .one_or_none())\n\n if extract:\n if extract.to_export:\n extract.to_export = False\n session.commit()\n espeak(\"Export false\")\n else:\n extract.to_export = True\n session.commit()\n espeak(\"Export true\")\n\n logger.info(f\"{extract} to_export field was \"\n f\"set to {extract.to_export}\")\n return True\n else:\n logger.error(\"Currently playing extract not found in DB.\")\n else:\n logger.error(\"No currently playing track.\")\n return False",
"def AddArchiveBuild(self, mode='dev', show_url=True,\n extra_archive_paths=None):\n cmd = [self._python, self._v8archive_tool, '--target', self._target]\n self.AddTestStep(shell.ShellCommand, 'Archiving', cmd,\n workdir='build/v8')",
"def archive(self):\n archive_key_parts = ['archive', self.version, self.filename]\n self.pipeline.copy_key(self.key_path, _make_key_path(archive_key_parts))",
"def is_archive_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"is_archive_enabled\")",
"def test_archive04(self):\n test_subject = StateArchiver(os.path.join(self.tmp_dir, 'does-not-exist'))\n test_subject.purge()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Toggle the creation/deletion of the temporary channel.
|
async def tempChannelsToggle(self, ctx: Context):
guildConfig = self.config.guild(ctx.guild)
enabled = await guildConfig.get_attr(KEY_ENABLED)()
if enabled:
enabled = False
self.logger.info(
"%s (%s) DISABLED the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(":negative_squared_cross_mark: TempChannel: Disabled.")
else:
enabled = True
self.logger.info(
"%s (%s) ENABLED the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(":white_check_mark: TempChannel: Enabled.")
await guildConfig.get_attr(KEY_ENABLED).set(enabled)
|
[
"async def tempChannelsDelete(self, ctx: Context):\n guildConfig = self.config.guild(ctx.guild)\n channelId = await guildConfig.get_attr(KEY_CH_ID)()\n channelCreated = await guildConfig.get_attr(KEY_CH_CREATED)()\n\n if channelCreated and channelId:\n # Channel created, see when we should delete it.\n try:\n chanObj = self.bot.get_channel(channelId)\n await chanObj.delete()\n except discord.DiscordException:\n self.logger.error(\"Could not delete channel!\", exc_info=True)\n await ctx.send(\n \":warning: TempChannel: Something went wrong \"\n \"while trying to delete the channel. Please \"\n \"check the console log for details.\"\n )\n else:\n await guildConfig.get_attr(KEY_CH_ID).set(None)\n await guildConfig.get_attr(KEY_CH_CREATED).set(False)\n self.logger.info(\n \"%s (%s) deleted the temp channel #%s (%s) in %s (%s).\",\n ctx.author.name,\n ctx.author.id,\n chanObj.name,\n chanObj.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\":white_check_mark: TempChannel: Channel deleted\")\n else:\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel: There is no \"\n \"temporary channel to delete!\"\n )",
"async def newtemp(self, ctx, *, name):\n server = ctx.message.server\n perms = ctx.message.server.get_member(\n self.bot.user.id).server_permissions\n\n cname = str(name)\n\n if server.id not in self.settings:\n self.initial_config(server.id)\n\n if perms.manage_channels is False:\n await self.bot.say('I do not have permission to do that')\n elif self.settings[server.id]['toggleactive'] is False:\n await self.bot.say('This command is currently turned off.')\n else:\n channel = await self.bot.create_channel(\n server, cname, type=discord.ChannelType.voice)\n if self.settings[server.id]['toggleowner'] is True:\n overwrite = discord.PermissionOverwrite()\n overwrite.manage_channels = True\n overwrite.manage_roles = True\n await self.bot.edit_channel_permissions(\n channel, ctx.message.author, overwrite)\n self.settings[server.id]['channels'].append(channel.id)\n self.save_json()",
"async def tempChannelsArchive(self, ctx: Context):\n guildConfig = self.config.guild(ctx.guild)\n archiving = await guildConfig.get_attr(KEY_ARCHIVE)()\n if archiving:\n archiving = False\n self.logger.info(\n \"%s (%s) DISABLED archiving the temp channel for %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel: Archiving disabled. \"\n \" The channel will be deleted after its lifetime expires.\"\n )\n else:\n archiving = True\n self.logger.info(\n \"%s (%s) ENABLED archiving the temp channel for %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":white_check_mark: TempChannel: Archiving enabled. The channel \"\n \"will have ALL user permissions revoked after its lifetime \"\n \"expires, and will be renamed with the date and time that it \"\n \"was archived.\"\n )\n await guildConfig.get_attr(KEY_ARCHIVE).set(archiving)",
"def test__Channel__delete__2():\n guild_id = 202211090005\n channel_id = 202211090006\n \n guild = Guild.precreate(guild_id)\n \n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_category, guild_id = guild_id)\n guild.channels[channel_id] = channel\n \n channel._delete(None)\n \n vampytest.assert_not_in(channel_id, guild.channels)",
"def test__Channel__delete__1():\n client_id = 202211090003\n channel_id = 202211090004\n \n client = Client(\n token = 'token_20221209_0001',\n client_id = client_id,\n )\n \n try:\n channel = Channel.precreate(channel_id, channel_type = ChannelType.private_group)\n client.group_channels[channel_id] = channel\n \n channel._delete(client)\n \n vampytest.assert_not_in(channel_id, client.group_channels)\n \n # Cleanup\n finally:\n client._delete()\n client = None",
"async def channeldelete(self, ctx):\n status = await self.bot.pool.fetch(\"SELECT * FROM loggingsettings WHERE guildid = $1\", ctx.guild.id)\n\n if status[0][\"channel_delete\"] == True:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET channel_delete = $1 WHERE guildid = $2\", False, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned off for channels being deleted.\")\n await ctx.send(embed=embed)\n return\n else:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET channel_delete = $1 WHERE guildid = $2\", True, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned on for channels being deleted.\")\n await ctx.send(embed=embed)",
"def toggle_remove( self, event ):\r\n\r\n\t\tself.remove_temp_files = not self.remove_temp_files\r\n\t\tself.settings_menu.Check( 201, self.remove_temp_files )\r\n\t\tself.save_settings( )",
"async def checkChannels(self): # pylint: disable=too-many-branches,too-many-statements\n while self == self.bot.get_cog(\"TempChannels\"):\n await asyncio.sleep(SLEEP_TIME)\n # Create/maintain the channel during a valid time and duration, else\n # delete it.\n for guild in self.bot.guilds:\n async with self.config.guild(guild).all() as guildData:\n try:\n if not guildData[KEY_ENABLED]:\n continue\n\n if (\n int(time.strftime(\"%H\")) == guildData[KEY_START_HOUR]\n and int(time.strftime(\"%M\")) == guildData[KEY_START_MIN]\n and not guildData[KEY_CH_CREATED]\n and not guildData[KEY_CH_ID]\n ):\n # See if ALL of the following is satisfied.\n # - It is the starting time.\n # - The channel creation flag is not set.\n # - The channel ID doesn't exist.\n #\n # If it is satisfied, let's create a channel, and then\n # store the following in the settings:\n # - Channel ID.\n # - Time to delete channel.\n # Start with permissions\n\n # Always allow the bot to read.\n permsDict = {self.bot.user: PERMS_READ_Y}\n\n if guildData[KEY_ROLE_ALLOW]:\n # If we have allow roles, automatically deny @everyone the \"Read\n # Messages\" permission.\n permsDict[guild.default_role] = PERMS_READ_N\n for roleId in guildData[KEY_ROLE_ALLOW]:\n role = discord.utils.get(guild.roles, id=roleId)\n self.logger.debug(\"Allowed role %s\", role)\n if role:\n permsDict[role] = deepcopy(PERMS_READ_Y)\n\n # Check for deny permissions.\n if guildData[KEY_ROLE_DENY]:\n for roleId in guildData[KEY_ROLE_DENY]:\n role = discord.utils.get(guild.roles, id=roleId)\n self.logger.debug(\"Denied role %s\", role)\n if role and role not in permsDict.keys():\n self.logger.debug(\"Role not in dict, adding\")\n permsDict[role] = deepcopy(PERMS_SEND_N)\n elif role:\n self.logger.debug(\"Updating role\")\n permsDict[role].update(send_messages=False)\n\n self.logger.debug(\"Current permission overrides: \\n%s\", permsDict)\n\n # Grab parent category. If not set, this will return None anyways.\n category = None\n if guildData[KEY_CH_CATEGORY]:\n category = discord.utils.get(\n guild.channels, id=guildData[KEY_CH_CATEGORY]\n )\n\n chanObj = await guild.create_text_channel(\n guildData[KEY_CH_NAME],\n overwrites=permsDict,\n category=category,\n position=guildData[KEY_CH_POS],\n topic=guildData[KEY_CH_TOPIC],\n nsfw=guildData[KEY_NSFW],\n )\n self.logger.info(\n \"Channel #%s (%s) in %s (%s) was created.\",\n chanObj.name,\n chanObj.id,\n guild.name,\n guild.id,\n )\n guildData[KEY_CH_ID] = chanObj.id\n\n # Set delete times, and save settings.\n duration = (\n guildData[KEY_DURATION_HOURS] * 60 * 60\n + guildData[KEY_DURATION_MINS] * 60\n )\n guildData[KEY_STOP_TIME] = time.time() + duration\n guildData[KEY_CH_CREATED] = True\n\n elif guildData[KEY_CH_CREATED]:\n # Channel created, see when we should delete it.\n if time.time() >= guildData[KEY_STOP_TIME]:\n self.logger.debug(\n \"Past channel stop time, clearing ID \" \"and created keys.\"\n )\n chanObj = guild.get_channel(guildData[KEY_CH_ID])\n guildData[KEY_CH_ID] = None\n guildData[KEY_CH_CREATED] = False\n\n if chanObj and guildData[KEY_ARCHIVE]:\n await chanObj.set_permissions(\n guild.default_role, overwrite=PERMS_READ_N\n )\n for role in guild.roles:\n if role == guild.default_role:\n continue\n await chanObj.set_permissions(\n role, overwrite=None, reason=\"Archiving tempchannel\"\n )\n currentDate = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n await chanObj.edit(name=f\"tc-{currentDate}\")\n self.logger.info(\n \"Channel #%s (%s) in %s (%s) was archived.\",\n chanObj.name,\n chanObj.id,\n guild.name,\n guild.id,\n )\n elif chanObj and not guildData[KEY_ARCHIVE]:\n await chanObj.delete()\n\n self.logger.info(\n \"Channel #%s (%s) in %s (%s) was deleted.\",\n chanObj.name,\n chanObj.id,\n guild.name,\n guild.id,\n )\n except Exception: # pylint: disable=broad-except\n self.logger.error(\n \"Something went terribly wrong for server %s (%s)!\",\n guild.name,\n guild.id,\n exc_info=True,\n )",
"def at_channel_create(self):\n pass",
"def test_create(self):\n name = 'test'\n\n call_command('waffle_switch', name, 'on', create=True)\n switch = Switch.objects.get(name=name, active=True)\n switch.delete()\n\n call_command('waffle_switch', name, 'off', create=True)\n Switch.objects.get(name=name, active=False)",
"async def todoset_channel(self, ctx, channel: discord.Channel = None):\n if channel is None:\n channel = ctx.message.channel\n self.settings[\"task_channel_id\"] = channel.id\n dataIO.save_json(JSON, self.settings)\n message = await self.bot.say(\"Channel set\")\n await asyncio.sleep(5)\n await self.bot.delete_message(ctx.message)\n await self.bot.delete_message(message)",
"def set_active_channel(self, chan):\n raise NotImplementedError",
"def at_channel_create(self):\r\n pass",
"async def disable(self, ctx):\r\n\r\n # Remove the channel from memory\r\n Database.Cogs[self.name][ctx.guild.id][\"settings\"][\"highlight_channel\"] = None\r\n\r\n # Write the settings to the database\r\n Database.writeSettings(self, ctx.guild.id)\r\n\r\n await ctx.message.add_reaction(Dictionary.check_box)",
"async def on_channel_delete(self, channel):",
"def toggle(self):",
"async def delete_channel(self, msg, *, name:str): #Allow Admin/Mod or Creator of that channel delete it\n name = name.replace(\" \",\"-\").lower()\n mod_bool= False\n Roles= self.redis.hgetall(\"{}:Channel:Config\".format(msg.message.server.id))\n Roles= \"{},{}\".format(Roles[\"Admin_Roles\"],Roles[\"Roles\"])\n if name in self.Temp_Chan[msg.message.server.id]:\n for role in msg.message.author.roles:\n print(role)\n if role.name in Roles:\n mod_bool = True\n break\n if msg.message.author.id == self.Temp_Chan[msg.message.server.id][name][\"Creator\"] or mod_bool is True:\n await self.bot.delete_channel(self.Temp_Chan[msg.message.server.id][name][\"Name\"])\n await self.bot.say_edit(\"{} is now delete.\".format(name.replace(\"-\",\" \")))\n else:\n await self.bot.say_edit(\"You do not have right to delete this!\\nYou need to be either creator of {} or mod\".format(name))\n else:\n await self.bot.say_edit(\"{} does not exist! Please double check spelling\".format(name))",
"def set_off(self,channel):\n\t\t\n\t\tif channel not in [0,1,2,3,4]: return\n\t\tresponse = self.send_command( 'OFF %d\\r' % channel )",
"def test__Channel__iter_delete__2():\n guild_id = 202211090022\n channel_id = 202211090023\n \n guild = Guild.precreate(guild_id)\n \n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_category, guild_id = guild_id)\n guild.channels[channel_id] = channel\n \n channels = {*channel._iter_delete(None)}\n \n vampytest.assert_eq(channels, {channel})\n vampytest.assert_not_in(channel_id, guild.channels)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the duration of the temp channel. Max 100 hours.
|
async def tempChannelsDuration(self, ctx: Context, hours: int, minutes: int):
if (hours >= 100) or (hours < 0):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Duration: "
"Please enter valid hours!"
)
return
if (minutes >= 60) or (minutes < 0):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Duration: "
"Please enter valid minutes!"
)
return
if (hours >= 99) and (minutes >= 60):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Duration: "
"Please enter a valid duration!"
)
return
guildConfig = self.config.guild(ctx.guild)
await guildConfig.get_attr(KEY_DURATION_HOURS).set(hours)
await guildConfig.get_attr(KEY_DURATION_MINS).set(minutes)
self.logger.info(
"%s (%s) set the duration to %s hours, %s minutes on %s (%s)",
ctx.author.name,
ctx.author.id,
hours,
minutes,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Duration: Duration set to "
"**{0} hours, {1} minutes**.".format(hours, minutes)
)
|
[
"def set_duration(self, hours, minutes, seconds):\n self.duration = (hours, minutes, seconds)",
"def set_duration(self, duration):\n pass",
"def duration(self, duration):\n self._duration = duration",
"async def set_frequency_duration_minutes(self, duration: int):\n self._frequency.duration_minutes = duration",
"def reset_duration(self):\n self.remaining_duration = self.duration",
"def duration_minutes(self, duration: int) -> None:\n self._duration = duration",
"def TT_set_timeout(self,timeval):\n tcount = timeval*DET_REF_CLK#Calculate time out counter value\n self.TT_CONFIG.write(ch1_data,int(tcount))#Write the new counter value to the time tagger",
"def set_desired_duration(self, desired_duration: float):\n self.desired_duration = desired_duration",
"def set_duration(self, duration):\n self.__test_result[TestResult.__DURATION] = round(duration * 1000)",
"def reset_duration(self):\n self.__duration = 0",
"def set_timeout(self, timeout):\r\n self.timeout = float(timeout)/1000.",
"def output_duration_units(self, val):\n self.__outputDurationUnits = val",
"def set_stats_timeout(self, timeout='10s'):\n self.stats['timeout'] = timeout",
"def _set_duration(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"duration\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"duration must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"duration\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__duration = t\n if hasattr(self, '_set'):\n self._set()",
"async def async_set_doorbell_chime_duration(self, chime_duration):\n await self.upv_object.set_doorbell_chime_duration(\n self._device_id, chime_duration\n )",
"def _set_timeout(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=RestrictedClassType(\n base_type=int, restriction_dict={\"range\": [\"0..65535\"]}, int_size=16\n ),\n is_leaf=True,\n yang_name=\"timeout\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/system\",\n defining_module=\"openconfig-system\",\n yang_type=\"uint16\",\n is_config=False,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"timeout must be of a type compatible with uint16\"\"\",\n \"defined-type\": \"uint16\",\n \"generated-type\": \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"timeout\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint16', is_config=False)\"\"\",\n }\n )\n\n self.__timeout = t\n if hasattr(self, \"_set\"):\n self._set()",
"def set_playbacktime():\n set_hours = input('Enter hours: ')\n set_minutes = input('Enter minutes: ')\n\n set_hours = ((set_hours * 60) * 60) * 1000\n set_minutes = (set_minutes * 60) * 1000\n\n # Sets the time in milliseconds\n player.set_time(set_hours + set_minutes)",
"def set_delta(self, delta_temp = 0, delta_hum = 0):\n self.delta_temp = delta_temp\n self.delta_hum = delta_hum",
"def setSleepLength(self, wait):\n\n self.wait = wait - self.PERIOD"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the parent category of the text channel.
|
async def tempChannelsCategory(
self, ctx: Context, *, category: discord.CategoryChannel = None
):
await self.config.guild(ctx.guild).get_attr(KEY_CH_CATEGORY).set(category.id)
if not category:
self.logger.info(
"%s (%s) disabled category nesting on %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Category: Parent " "category disabled."
)
else:
self.logger.info(
"%s (%s) set the parent category ID to %s on %s (%s)",
ctx.author.name,
ctx.author.id,
category.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Category: Parent "
"category set to **{}**.".format(category.name)
)
|
[
"def set_parent(self, parent):\n self._parent = parent",
"def set_parent(self, parent_node):\n self.parent = parent_node",
"def assignParentControl(self):\r\n super(GUI, self).assignParentControl()\r\n selected = pm.selected()\r\n\r\n if not selected:\r\n self.parent_control.setText('')\r\n return\r\n\r\n parent_control = selected[-1].nodeName()\r\n if len(selected) > 1:\r\n pm.warning('More than one object selected, assigning ' + parent_control + ' as parent control')\r\n\r\n self.parent_control.setText(parent_control)",
"def set_parent(self, parent):\n self.parent = parent\n self.level = parent.level + 1 if parent else 0",
"def _structure_parent_category(_parent_id, _payload):\n if _parent_id:\n _payload['data']['parent_category'] = {\n 'id': _parent_id\n }\n return _payload",
"def set_parent(self, parent):\n # If the attribute already has a parent (we are moving the attribute) then fail with a runtime exception.\n if self._parent:\n raise CloudioModificationException('The parent of an Attribute can never be changed ' +\n '(Attributes can not be moved)!')\n # assert isinstance(parent, CloudioAttributeContainer), 'Wrong type for parent attribute!'\n self._parent = parent",
"def set_parent_id(self, parent_id):\n pass",
"def set_parent ( self, parent ):\n self.parent_ref = get_object_ref ( parent )",
"def set_parent(self, value):\n #Ensure that this sprite is hidden\n if self.visible:\n self.show(False)\n\n #Change the parent\n self._parent = value",
"def set_current_category(self, category=None):\n if category is None and self.current_category is None:\n return\n if category is not None and self.current_category is not None \\\n and category == self.current_category:\n # close the current category\n self.current_category = None\n else:\n self.current_category = category\n if settings.get('feeds', 'default') == 'labels':\n self.update_feed_list()\n # display maximum of content for the category\n if self.current_category:\n feed_unread_only = self.unread_only and not isinstance(self.current_category, SpecialCategory)\n feeds = self.current_category.get_feeds(unread_only=feed_unread_only)\n try:\n # start with last\n max_index = self.ui.listFeedList.model().index_of(feeds[-1])\n self.ui.listFeedList.scrollTo(max_index)\n # then scroll again to category\n min_index = self.ui.listFeedList.model().index_of(self.current_category)\n self.ui.listFeedList.scrollTo(min_index)\n except:\n pass",
"def setParent(self, parent):\n assert isinstance(parent, RedBlackTree) or parent == None\n self.parentTree = parent",
"def set_parent(self, node_id: int):\r\n self.parent = node_id",
"def parent(self, parent):\n warnings.warn(\n \"Setting a parent is potentially dangerous. Consider using \"\n \"Topology.add_subtopology instead\"\n )\n if parent is None:\n raise NotImplementedError(\n \"Setting parents to None is not yet supported\"\n )\n self._parent = _validate_parent(parent)",
"def setParent(self, parent):\n \n # Don't allow a node to set its parent as one of its children!\n if (parent in self.unorderedChildren):\n logging.error(\"Node.setParent: cannot set a node's child to be its own parent! node = {}; parent = {}\"\n .format(self.name, parent.name))\n return\n \n # 1st, remove this child from its current parent\n if (self.parent is not None):\n self.parent.__removeChild(self)\n \n # 2nd, set the new parent (setting to None is OK)\n self.parent = parent\n if (self.parent is not None):\n self.parent.__addChild(self)",
"def parent_category(self, record):\n parent_id = None\n category = record.openerp_id\n backend = record.backend_id\n\n # Does the parent category is the category mapped to Vocabulary?\n is_main = category.parent_id.id == backend.main_product_category_id.id\n\n if category.parent_id and not is_main:\n parent_id = category.parent_id.drupal_bind_ids[0].drupal_id\n\n return {'parent': parent_id}",
"def set_parent(self, parent: 'Node'):\n if parent == self.parent:\n return\n self.parent = parent\n if parent is not None:\n self.parent.add_child(self)",
"def category(self, category: ConfigNodePropertyString):\n\n self._category = category",
"def setParent(self,p,uparent=None,eparent=None):\n if self.parent != None:\n self.parent.children.remove(self)\n self.parent = p\n self.uparent = uparent\n self.eparent = eparent\n p.children.append(self)",
"def add_parent(self, p):\n assert isinstance(p, TermClass)\n rep = self.get_representative()\n rep.parents.add(p)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a role to allow access to the channel.
|
async def tempChannelsAllowAdd(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:
if role.id not in roleAllow:
roleAllow.append(role.id)
self.logger.info(
"%s (%s) added role %s to the allow list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role Allow: **`{0}`"
"** will be allowed access.".format(role.name)
)
else:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Allow: "
"**`{0}`** is already allowed.".format(role.name)
)
|
[
"def add_role(self, role):\n print(f'Parsing permissions for the role - {role.name}')\n\n self.permissions = role.permissions\n self.can_kick = role.permissions.kick_members\n self.can_ban = role.permissions.ban_members\n self.can_move = role.permissions.move_members\n self.can_manage_roles = role.permissions.manage_roles\n\n print('Permissions locked')\n print(f' Can kick - {self.can_kick}')\n print(f' Can ban - {self.can_ban}')\n print(f' Can move - {self.can_move}')\n print(f' Can manage roles - {self.can_manage_roles}')",
"async def assign_role(self, ctx, * , role: CustomRoleConverter):\n settable_role = find(lambda r: r.id in self.settable_roles, ctx.guild.roles)\n if role == settable_role and self.lockdown:\n await ctx.send(\"Server on lockdown due to high amount of people joining try again in a day or two\")\n return\n if role.position > settable_role.position:\n if ctx.channel.name != \"have-you-read-the-rules\":\n await ctx.send(\"can't give you that role\")\n return\n try:\n admin_cog = self.bot.get_cog(\"Admin\")\n if admin_cog:\n if admin_cog.mute_role == role:\n return\n member = ctx.message.author\n await member.add_roles(role)\n await ctx.send(f\"Assigned you the following role: {role.name}\")\n except discord.Forbidden as fb:\n await ctx.send(\"Sorry I don't have the permission to give you that role\")",
"async def addRole(self, ctx, role: discord.Role):\n guild = self.bot.cache.get_setting(ctx.guild.id)\n allowed_roles = guild.allowed_roles\n if not allowed_roles:\n roles = []\n roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)\n return\n if role.id in allowed_roles:\n return await ctx.reply(\n f\":negative_squared_cross_mark: | `{role.name}` role already has permissions to make announcements!\"\n )\n allowed_roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n allowed_roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": allowed_roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` role can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)",
"async def tempChannelsDenyAdd(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_DENY)() as roleDeny:\n if role.id not in roleDeny:\n roleDeny.append(role.id)\n self.logger.info(\n \"%s (%s) added role %s to the deny list on %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n role.name,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":white_check_mark: TempChannel - Role: **`{0}`** will \"\n \"be denied sending, provided this role is higher \"\n \"than any of the ones in the allowed list.\".format(role.name)\n )\n else:\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel - Role Deny: \"\n \"**`{0}`** is already denied.\".format(role)\n )",
"def add_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.add(user)",
"async def add_sub_role(self, ctx, *, role:discord.Role):\n roles = self.egl_db.get('sub_roles', [])\n if role.name in roles:\n await self.bot.say('This role is already subscribable')\n return\n\n if role.permissions.value > 0:\n await self.bot.say('You cannot add a role with any permissions for server security reasons.')\n return\n\n await self.bot.say('You can add this role to the list of subscribable roles.')",
"def add_gp_role(request, role, group, domain=None, project=None):\n ksclient = get_admin_ksclient()\n return ksclient.roles.grant(role=role, group=group, domain=domain,\n project=project)",
"async def addRole(guild, join_message, role: str) -> str:\n if role in getRoles(guild.id):\n return f'{role} already exists!'\n\n # Otherwise, create the role\n else:\n try:\n await guild.create_role(name=role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(task=\"create_role\", detail=role)\n\n getRoles(guild.id)[role] = join_message\n dumpConfigRoles(guild.id)\n return ''",
"async def setbanned_addrole(self, ctx, *, role):\n server = ctx.message.server\n self.check_server_settings(server)\n server_role = discord.utils.get(server.roles, name=role)\n if server_role is None:\n await self.bot.say(\n '{} is not a valid role on this server.'.format(role))\n return\n self.check_server_settings(server)\n if server_role.id in self.settings[server.id][\"ROLES\"]:\n await self.bot.say(\n '{} is already in the list.'.format(role))\n return\n self.settings[server.id][\"ROLES\"].append(server_role.id)\n role_ids = self.settings[server.id][\"ROLES\"]\n roles = [discord.utils.get(server.roles, id=id) for id in role_ids]\n role_names = [r.name for r in roles]\n await self.bot.say(\n 'List of roles updated: {}.'.format(\n ', '.join(role_names)))\n dataIO.save_json(JSON, self.settings)",
"async def _setfullaccessrole(self, ctx: commands.Context, role: discord.Role):\n await self.config.guild(ctx.guild).fullaccessrole.set(role.id)\n await ctx.send(f\"Full rcon access role has been set to {role}\")",
"def add_permission_to_role(self, role, permission) -> None:\n raise NotImplementedError",
"def add_role_permission(self, user, role, key, path=None):\n if self.can_modify_roles(user) and role in self[\"roles\"]:\n if path is None:\n self[\"general\"][key].append(role)\n else:\n if path not in self[\"files\"]:\n self._generate_file_entry(path)\n self[\"files\"][path][key].append(role)\n return True\n return False",
"async def setlinkrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredLinkRole\", \"\")\r\n\t\t\tmsg = 'Add/remove links now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredLinkRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove links set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)",
"def create_role(self, user, role):\n if self.can_modify_roles(user) and role not in self[\"roles\"] and role != \"default\":\n self[\"roles\"][role] = []\n return True\n return False",
"async def addrole(self, ctx, role:discord.Role, *users:discord.User):\n if ctx.message.server.me.permissions_in(ctx.message.channel).manage_roles == False:\n await self.bot.say(\"Sorry, I do not have the manage_roles permission\\n**Aborting**\")\n return\n if len(users) == 0:\n await self.bot.say(\":no_entry: You need to specify a user to give the role too.\")\n idk = []\n for user in users:\n await self.bot.add_roles(user, role)\n idk.append(user.name)\n await self.bot.say(\"ok, gave user(s) `\" + \", \".join(idk) + \"` the role {0}\".format(role.name))",
"async def subscribe(self, ctx, *, role:discord.Role):\n await self.do_subscription(ctx, role, 'add_roles')",
"def set_role(self, role):\n self.role.set(role)",
"async def addrole(ctx, role : discord.Role = None, xp : int = None):\r\n\tisAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\r\n\t# Only allow admins to change server stats\r\n\tif not isAdmin:\r\n\t\tawait bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')\r\n\t\treturn\r\n\r\n\tif role == None or xp == None:\r\n\t\tmsg = 'Usage: `$addrole [role] [required xp]`'\r\n\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\treturn\r\n\r\n\tif not type(xp) is int:\r\n\t\tmsg = 'Usage: `$addrole [role] [required xp]`'\r\n\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\treturn\r\n\r\n\tif type(role) is str:\r\n\t\ttry:\r\n\t\t\trole = discord.utils.get(message.server.roles, name=role)\r\n\t\texcept:\r\n\t\t\tprint(\"That role does not exist\")\r\n\t\t\treturn\r\n\r\n\t# Now we see if we already have that role in our list\r\n\tpromoArray = getServerStat(ctx.message.server, globals.serverList, \"PromotionArray\")\r\n\r\n\tfor aRole in promoArray:\r\n\t\t# Get the role that corresponds to the id\r\n\t\tif aRole['ID'] == role.id:\r\n\t\t\t# We found it - throw an error message and return\r\n\t\t\tmsg = '{} is already in the list. Required xp: {}'.format(role.name, aRole['XP'])\r\n\t\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\t\treturn\r\n\r\n\t# If we made it this far - then we can add it\r\n\tpromoArray.append({ 'ID' : role.id, 'Name' : role.name, 'XP' : xp })\r\n\tsetServerStat(ctx.message.server, globals.serverList, \"PromotionArray\", promoArray)\r\n\r\n\tmsg = '{} added to list. Required xp: {}'.format(role.name, xp)\r\n\tawait bot.send_message(ctx.message.channel, msg)\r\n\treturn",
"def grant_role(self, user, targetUser, role):\n if self.can_grant_permissions(user):\n if role in self[\"roles\"]:\n self[\"roles\"][role].append(targetUser)\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove a role from being able access the temporary channel.
|
async def tempChannelsAllowRemove(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:
if not roleAllow or role.id not in roleAllow:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Allow: "
"**`{0}`** wasn't on the list.".format(role.name)
)
else:
roleAllow.remove(role.id)
self.logger.info(
"%s (%s) removed role %s from the allow list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role Allow: **`{0}`** "
"removed from the list.".format(role.name)
)
|
[
"async def tempChannelsDenyRemove(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_DENY)() as roleDeny:\n if not roleDeny or role.id not in roleDeny:\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel - Role Deny: \"\n \"**`{0}`** wasn't on the list.\".format(role.name)\n )\n else:\n roleDeny.remove(role.id)\n self.logger.info(\n \"%s (%s) removed role %s from the deny list on %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n role.name,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":white_check_mark: TempChannel - Role Deny: **`{0}`** \"\n \"removed from the list.\".format(role.name)\n )",
"async def _remove(self, ctx: commands.Context, user: discord.Member, role: discord.Role):\n async with self.config.member(user).temp_roles() as user_tr:\n if not (user_tr.get(str(role.id))):\n return await ctx.send(\n f\"That is not an active TempRole for {user.mention}.\",\n allowed_mentions=discord.AllowedMentions.none()\n )\n del user_tr[str(role.id)]\n message = f\"TempRole {role.mention} for {user.mention} has been removed.\"\n await ctx.send(\n message,\n allowed_mentions=discord.AllowedMentions.none()\n )\n await self._maybe_send_log(ctx.guild, message)\n await self._tr_end(user, role, admin=ctx.author)",
"def removeRole(self, role):\n pass",
"async def _delmodrole(self, ctx: commands.Context, role: discord.Role):\n async with self.config.guild(ctx.guild).modroles() as modroles:\n if role.id in modroles:\n modroles.remove(role.id)\n await ctx.send(f\"{role} role has been removed.\")\n else:\n await ctx.send(\"That role isn't in the list.\")",
"async def remove(self, ctx, *, role_name):\n found_role = None\n for role in ctx.guild.roles:\n if role.name.lower() == role_name.lower():\n found_role = role\n if found_role:\n try:\n success = await \\\n self.bot.pg_utils.remove_autoassign_role(\n ctx.guild.id, found_role.id, self.bot.logger)\n except ValueError:\n local_embed = discord.Embed(\n title=f'{found_role.name} is already'\n ' not on the auto-assignable list',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return\n if success:\n local_embed = discord.Embed(\n title=f'Removed {found_role.name} '\n 'from auto-assignable roles',\n description=' ',\n color=0x419400\n )\n else:\n local_embed = discord.Embed(\n title=f'Internal error occured,'\n ' please contact @dashwav#7785',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n else:\n local_embed = discord.Embed(\n title=f'Couldn\\'t find role {role_name}',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)",
"async def _unrequest(self, ctx, role : str):\n\n # attempt to find role that user specied for removal\n auth = ctx.message.author\n serv = ctx.message.guild\n role = dh.get_role(serv, role)\n guild_id = str(serv.id)\n role_id = str(role.id)\n\n # if user failed to find specify role, complain\n if not role:\n await ctx.send('Please specify a valid role')\n return\n\n # get a list of roles that are listed as public and the user roles\n available_roles = self.conf.get(guild_id, {}).get('pub_roles', [])\n user_roles = discord.utils.find(lambda r: str(r.id) == role_id, auth.roles)\n\n # ONLY remove roles if they are in the public roles list\n # Unless there is no list,\n # in which case any of the user's roles can be removed\n if role_id in available_roles or user_roles:\n await auth.remove_roles(role)\n await ctx.send(ok('you no longer have that role'))\n else:\n await ctx.send(error('I\\'m afraid that I can\\'t remove that role'))",
"async def _remove(self, ctx: commands.Context, role: discord.Role, *channels: typing.Union[discord.VoiceChannel, discord.CategoryChannel]):\n\n async with self.config.guild(ctx.guild).stream_roles() as settings:\n if existing := settings.get(str(role.id)):\n for c in channels:\n if c.id in existing:\n settings[str(role.id)].remove(c.id)\n if not settings[str(role.id)]:\n del settings[str(role.id)]\n else:\n return await ctx.send(\"There are no VCs or Categories with that StreamRole!\")\n\n return await ctx.tick()",
"def test_remove_role_from_user(self):\n pass",
"def delete_role(self, user, role):\n if self.can_modify_roles(user) and role in self[\"roles\"] and role != \"default\":\n for permission in Permissions.ROLE_PERMISSIONS:\n if role in self[\"general\"][permission]:\n self[\"general\"][permission].remove(role)\n for path in self[\"files\"]:\n if role in self[\"files\"][path][\"roles_write\"]:\n self[\"files\"][path][\"roles_write\"].remove(role)\n del self[\"roles\"][role]\n return True\n return False",
"async def setbanned_removerole(self, ctx, *, role):\n server = ctx.message.server\n self.check_server_settings(server)\n server_role = discord.utils.get(server.roles, name=role)\n if server_role is None:\n await self.bot.say(\n '{} is not a valid role on this server.'.format(role))\n return\n self.check_server_settings(server)\n if server_role.id not in self.settings[server.id][\"ROLES\"]:\n await self.bot.say(\n '{} is not on in the list.'.format(role))\n return\n self.settings[server.id][\"ROLES\"].remove(server_role.id)\n await self.bot.say(\n 'Removed {} from list of roles.'.format(role))\n dataIO.save_json(JSON, self.settings)",
"async def remove(self, ctx, *, role: discord.Role):\r\n\t\tdata = json_mngr().read('./data/settings.json')\r\n\t\tif str(ctx.guild.id) not in data.keys():\r\n\t\t\tdata[str(ctx.guild.id)] = {\r\n\t\t\t\t\"edit_roles\": [],\r\n\t\t\t\t\"view_roles\": [],\r\n\t\t\t\t\"log_channel\": None\r\n\t\t\t}\r\n\t\tif role:\r\n\t\t\tif role.id in data[str(ctx.guild.id)]['edit_roles']:\r\n\t\t\t\tdata[str(ctx.guild.id)]['edit_roles'].remove(role.id)\r\n\t\t\tjson_mngr().handle_modify('./data/settings.json', newdata=data, indent=2, backup=True)\r\n\t\t\tawait ctx.send(f\"removed {role.id} as an editing role.\")",
"def remove_role(user, role):\n return _assign_or_remove_role(user, role, \"remove_role_from_user\")",
"def test_remove_user_role(self):\n pass",
"async def removeadmin(ctx, role : discord.Role = None):\r\n\tisAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\r\n\t# Only allow admins to change server stats\r\n\tif not isAdmin:\r\n\t\tawait bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')\r\n\t\treturn\r\n\r\n\tif role == None:\r\n\t\tmsg = 'Usage: `$removeadmin [role]`'\r\n\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\treturn\r\n\r\n\tif type(role) is str:\r\n\t\ttry:\r\n\t\t\trole = discord.utils.get(message.server.roles, name=role)\r\n\t\texcept:\r\n\t\t\tprint(\"That role does not exist\")\r\n\t\t\treturn\r\n\r\n\t# If we're here - then the role is a real one\r\n\tpromoArray = getServerStat(ctx.message.server, globals.serverList, \"AdminArray\")\r\n\r\n\tfor aRole in promoArray:\r\n\t\t# Get the role that corresponds to the id\r\n\t\tif aRole['ID'] == role.id:\r\n\t\t\t# We found it - let's remove it\r\n\t\t\tpromoArray.remove(aRole)\r\n\t\t\tsetServerStat(ctx.message.server, globals.serverList, \"AdminArray\", promoArray)\r\n\t\t\tmsg = '{} removed successfully.'.format(aRole['Name'])\r\n\t\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\t\treturn\r\n\r\n\t# If we made it this far - then we didn't find it\r\n\tmsg = '{} not found in list.'.format(aRole['Name'])\r\n\tawait bot.send_message(ctx.message.channel, msg)",
"async def remove_course_role(self, role_id):\n role = self.bot.get_guild(self.guild_id).get_role(role_id)\n if role is None:\n return logger.error(\"role is empty.\")\n\n await role.delete()",
"async def remove(self, ctx, *, user_name: str):\n\n async with self.bot.db.execute(\"SELECT role_id FROM mapset_channels WHERE user_id = ? AND channel_id = ?\",\n [int(ctx.author.id), int(ctx.channel.id)]) as cursor:\n role_id_list = await cursor.fetchone()\n if not role_id_list:\n await ctx.send(\"not your mapset channel\")\n return\n\n member = get_member_helpers.get_member_guaranteed(ctx, user_name)\n if not member:\n await ctx.send(\"No member found with what you specified. Try using a Discord account ID.\")\n return\n\n role = ctx.guild.get_role(int(role_id_list[0]))\n if not role:\n await ctx.reply(\"Looks like the role for this mapset channel no longer exists.\")\n return\n\n try:\n await member.remove_roles(role, reason=\"removed from mapset\")\n await ctx.send(f\"removed {member.mention} from this channel\")\n except discord.Forbidden:\n await ctx.reply(\"I do not have permissions to remove roles.\")",
"async def removeadmin(self, ctx, *, role : str = None):\r\n\r\n\t\tusage = 'Usage: `{}removeadmin [role]`'.format(ctx.prefix)\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\treturn await ctx.send(usage)\r\n\r\n\t\t# Name placeholder\r\n\t\troleName = role\r\n\t\tif type(role) is str:\r\n\t\t\tif role.lower() == \"everyone\" or role.lower() == \"@everyone\":\r\n\t\t\t\trole = ctx.guild.default_role\r\n\t\t\telse:\r\n\t\t\t\trole = DisplayName.roleForName(role, ctx.guild)\r\n\r\n\t\t# If we're here - then the role is a real one\r\n\t\tpromoArray = self.settings.getServerStat(ctx.message.guild, \"AdminArray\")\r\n\r\n\t\tfor aRole in promoArray:\r\n\t\t\t# Check for Name\r\n\t\t\tif aRole['Name'].lower() == roleName.lower():\r\n\t\t\t\t# We found it - let's remove it\r\n\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\tself.settings.setServerStat(ctx.message.guild, \"AdminArray\", promoArray)\r\n\t\t\t\tmsg = '**{}** removed successfully.'.format(aRole['Name'])\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t\t# Get the role that corresponds to the id\r\n\t\t\tif role and (str(aRole['ID']) == str(role.id)):\r\n\t\t\t\t# We found it - let's remove it\r\n\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\tself.settings.setServerStat(ctx.message.guild, \"AdminArray\", promoArray)\r\n\t\t\t\tmsg = '**{}** removed successfully.'.format(role.name)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we didn't find it\r\n\t\tmsg = '**{}** not found in list.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)",
"def remove_permission_from_role(self, role, permission) -> None:\n raise NotImplementedError",
"async def voice_unlink(\n self, ctx: Context, role: Role, voice_channel: VoiceChannel\n ):\n amount = await RoleForVCRepository().destroy_by(\n {\"role_id\": role.id, \"voice_channel_id\": voice_channel.id}\n )\n\n if amount > 0:\n await ctx.send(\n f\"Role <@&{role.id}> and voice channel \"\n f\"**<#{voice_channel.id}>** unlinked successfully!\"\n )\n elif amount == 0:\n await ctx.send(\n f\"Role <@&{role.id}> and voice channel \"\n f\"**<#{voice_channel.id}>** are not linked.\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a role to block sending message to the channel. This role should be HIGHER in the role hierarchy than the roles in the allowed list! The bot will not check for this.
|
async def tempChannelsDenyAdd(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_DENY)() as roleDeny:
if role.id not in roleDeny:
roleDeny.append(role.id)
self.logger.info(
"%s (%s) added role %s to the deny list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role: **`{0}`** will "
"be denied sending, provided this role is higher "
"than any of the ones in the allowed list.".format(role.name)
)
else:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Deny: "
"**`{0}`** is already denied.".format(role)
)
|
[
"async def tempChannelsAllowAdd(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:\n if role.id not in roleAllow:\n roleAllow.append(role.id)\n self.logger.info(\n \"%s (%s) added role %s to the allow list on %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n role.name,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":white_check_mark: TempChannel - Role Allow: **`{0}`\"\n \"** will be allowed access.\".format(role.name)\n )\n else:\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel - Role Allow: \"\n \"**`{0}`** is already allowed.\".format(role.name)\n )",
"async def setbanned_addrole(self, ctx, *, role):\n server = ctx.message.server\n self.check_server_settings(server)\n server_role = discord.utils.get(server.roles, name=role)\n if server_role is None:\n await self.bot.say(\n '{} is not a valid role on this server.'.format(role))\n return\n self.check_server_settings(server)\n if server_role.id in self.settings[server.id][\"ROLES\"]:\n await self.bot.say(\n '{} is already in the list.'.format(role))\n return\n self.settings[server.id][\"ROLES\"].append(server_role.id)\n role_ids = self.settings[server.id][\"ROLES\"]\n roles = [discord.utils.get(server.roles, id=id) for id in role_ids]\n role_names = [r.name for r in roles]\n await self.bot.say(\n 'List of roles updated: {}.'.format(\n ', '.join(role_names)))\n dataIO.save_json(JSON, self.settings)",
"async def assign_role(self, ctx, * , role: CustomRoleConverter):\n settable_role = find(lambda r: r.id in self.settable_roles, ctx.guild.roles)\n if role == settable_role and self.lockdown:\n await ctx.send(\"Server on lockdown due to high amount of people joining try again in a day or two\")\n return\n if role.position > settable_role.position:\n if ctx.channel.name != \"have-you-read-the-rules\":\n await ctx.send(\"can't give you that role\")\n return\n try:\n admin_cog = self.bot.get_cog(\"Admin\")\n if admin_cog:\n if admin_cog.mute_role == role:\n return\n member = ctx.message.author\n await member.add_roles(role)\n await ctx.send(f\"Assigned you the following role: {role.name}\")\n except discord.Forbidden as fb:\n await ctx.send(\"Sorry I don't have the permission to give you that role\")",
"def add_role(self, role):\n print(f'Parsing permissions for the role - {role.name}')\n\n self.permissions = role.permissions\n self.can_kick = role.permissions.kick_members\n self.can_ban = role.permissions.ban_members\n self.can_move = role.permissions.move_members\n self.can_manage_roles = role.permissions.manage_roles\n\n print('Permissions locked')\n print(f' Can kick - {self.can_kick}')\n print(f' Can ban - {self.can_ban}')\n print(f' Can move - {self.can_move}')\n print(f' Can manage roles - {self.can_manage_roles}')",
"async def addRole(guild, join_message, role: str) -> str:\n if role in getRoles(guild.id):\n return f'{role} already exists!'\n\n # Otherwise, create the role\n else:\n try:\n await guild.create_role(name=role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(task=\"create_role\", detail=role)\n\n getRoles(guild.id)[role] = join_message\n dumpConfigRoles(guild.id)\n return ''",
"def add_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.add(user)",
"async def addRole(self, ctx, role: discord.Role):\n guild = self.bot.cache.get_setting(ctx.guild.id)\n allowed_roles = guild.allowed_roles\n if not allowed_roles:\n roles = []\n roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)\n return\n if role.id in allowed_roles:\n return await ctx.reply(\n f\":negative_squared_cross_mark: | `{role.name}` role already has permissions to make announcements!\"\n )\n allowed_roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n allowed_roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": allowed_roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` role can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)",
"async def add_sub_role(self, ctx, *, role:discord.Role):\n roles = self.egl_db.get('sub_roles', [])\n if role.name in roles:\n await self.bot.say('This role is already subscribable')\n return\n\n if role.permissions.value > 0:\n await self.bot.say('You cannot add a role with any permissions for server security reasons.')\n return\n\n await self.bot.say('You can add this role to the list of subscribable roles.')",
"async def tempChannelsAllowRemove(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:\n if not roleAllow or role.id not in roleAllow:\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel - Role Allow: \"\n \"**`{0}`** wasn't on the list.\".format(role.name)\n )\n else:\n roleAllow.remove(role.id)\n self.logger.info(\n \"%s (%s) removed role %s from the allow list on %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n role.name,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":white_check_mark: TempChannel - Role Allow: **`{0}`** \"\n \"removed from the list.\".format(role.name)\n )",
"async def sethackrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredHackRole\", \"\")\r\n\t\t\tmsg = 'Add/remove hacks now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredHackRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove hacks set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)",
"async def _add(self, ctx: commands.Context, user: discord.Member, role: discord.Role, *, time: TimeConverter):\n if role in user.roles:\n return await ctx.send(f\"That user already has {role.mention}!\")\n\n if role >= ctx.guild.me.top_role or (role >= ctx.author.top_role and ctx.author != ctx.guild.owner):\n return await ctx.send(\"That role cannot be assigned due to the Discord role hierarchy!\")\n\n async with self.config.member(user).temp_roles() as user_tr:\n if user_tr.get(str(role.id)):\n return await ctx.send(\n f\"That is already an active TempRole for {user.mention}!\",\n allowed_mentions=discord.AllowedMentions.none()\n )\n end_time = datetime.now() + time\n user_tr[str(role.id)] = end_time.timestamp()\n\n if role < ctx.guild.me.top_role:\n if role not in user.roles:\n await user.add_roles(\n role,\n reason=f\"TempRole: added by {ctx.author}, expires in {time.days}d {time.seconds//3600}h\"\n )\n else:\n return await ctx.send(\"I cannot assign this role!\")\n\n message = f\"TempRole {role.mention} for {user.mention} has been added. Expires in {time.days} days {time.seconds//3600} hours.\"\n await ctx.send(\n message,\n allowed_mentions=discord.AllowedMentions.none()\n )\n\n await self._maybe_send_log(ctx.guild, message)\n await self._tr_timer(user, role, end_time.timestamp())",
"async def set_channel(self, ctx, role: discord.Role, channel: discord.TextChannel):\n cursor = self.bot.database.cursor()\n cursor.execute(\"SELECT member_ids FROM roles WHERE guild_id = ? AND role_id = ?\", (ctx.guild.id, role.id))\n self.bot.database.commit()\n row = cursor.fetchone()\n if row == None:\n return await ctx.send(\":no_entry: This role hasn't been added!\")\n cursor.execute(\"UPDATE roles SET channel_id = ? WHERE guild_id = ? AND role_id = ?\", (channel.id, ctx.guild.id, role.id))\n self.bot.database.commit()\n await ctx.send(\":white_check_mark: The channel has been changed!\")",
"async def add_member(self, ctx, role: discord.Role, member: discord.Member):\n cursor = self.bot.database.cursor()\n cursor.execute(\"SELECT member_ids, channel_id FROM roles WHERE guild_id = ? AND role_id = ?\", (ctx.guild.id, role.id))\n self.bot.database.commit()\n row = cursor.fetchone()\n if row == None:\n return await ctx.send(\":no_entry: This role hasn't been added!\")\n member_ids = json.loads(row[0])\n if member.id in member_ids:\n return await ctx.send(\":no_entry: This user has already been added to this role!\")\n member_ids.append(member.id)\n cursor.execute(\"UPDATE roles SET member_ids = ? WHERE guild_id = ? AND role_id = ?\", (json.dumps(member_ids), ctx.guild.id, role.id))\n self.bot.database.commit()\n await ctx.send(\":white_check_mark: The user has been added to this role!\")\n if not role in member.roles:\n try:\n await member.add_roles(role, reason=\"Automatic role assignment\")\n except discord.errors.Forbidden:\n self.bot.logger.error(\"Missing permissions for giving %s (ID: %s) the role %s (ID: %s)\" % (str(member), str(member.id), str(role), str(role_id)))\n else:\n channel = ctx.guild.get_channel(row[1])\n if channel == None:\n self.bot.logger.error(\"Channel not found: %s\" % str(row[1]))\n return\n await channel.send(self.bot.config[\"role_received_message\"].replace(\"$MENTION\", member.mention))",
"async def add_roles(self, ctx,\n message: typing.Union[discord.Message, str] = None, *, roles: converters.RoleConvertor):\n # Lookup by “{channel ID}-{message ID}” (retrieved by shift-clicking on “Copy ID”).\n # Lookup by message ID (the message must be in the context channel).\n # Lookup by message URL.\n # noinspection PyTypeChecker\n if len(roles) >= self.plugin.data.reactions.max_roles:\n return await ctx.send_line(f\"{ctx.emotes.web_emotion.xx} You can't include anymore roles.\")\n if len(ctx.guild_profile.reactions.roles) >= self.plugin.data.reactions.max_messages:\n return await ctx.send_line(f\"{ctx.emotes.web_emotion.xx} You cannot create anymore reaction roles.\")\n if not await ctx.confirm():\n return\n # noinspection PyTypeChecker\n roles_emotes = list(zip(roles, self.emotes))\n if not isinstance(message, discord.Message):\n message = message or \"Reaction Roles\"\n embed = ctx.embeds.primary()\n embed.set_author(name=message)\n embed.description = \"```css\\nReact to the emote corresponding to the role you wish to have.```\\n\"\n embed.description += \"\\n\".join([f\"{emote} {role.mention}\" for role, emote in roles_emotes]) + \"\\n\"\n embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon_url)\n message = await ctx.send(embed=embed)\n for _, emote in roles_emotes:\n await message.add_reaction(emote)\n await ctx.guild_profile.reactions.add_roles(message.id, roles)\n await ctx.send_line(f\"{ctx.emotes.web_emotion.galka} Provided roles has been set as reaction roles.\")",
"async def addrole(ctx, role : discord.Role = None, xp : int = None):\r\n\tisAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\r\n\t# Only allow admins to change server stats\r\n\tif not isAdmin:\r\n\t\tawait bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')\r\n\t\treturn\r\n\r\n\tif role == None or xp == None:\r\n\t\tmsg = 'Usage: `$addrole [role] [required xp]`'\r\n\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\treturn\r\n\r\n\tif not type(xp) is int:\r\n\t\tmsg = 'Usage: `$addrole [role] [required xp]`'\r\n\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\treturn\r\n\r\n\tif type(role) is str:\r\n\t\ttry:\r\n\t\t\trole = discord.utils.get(message.server.roles, name=role)\r\n\t\texcept:\r\n\t\t\tprint(\"That role does not exist\")\r\n\t\t\treturn\r\n\r\n\t# Now we see if we already have that role in our list\r\n\tpromoArray = getServerStat(ctx.message.server, globals.serverList, \"PromotionArray\")\r\n\r\n\tfor aRole in promoArray:\r\n\t\t# Get the role that corresponds to the id\r\n\t\tif aRole['ID'] == role.id:\r\n\t\t\t# We found it - throw an error message and return\r\n\t\t\tmsg = '{} is already in the list. Required xp: {}'.format(role.name, aRole['XP'])\r\n\t\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\t\treturn\r\n\r\n\t# If we made it this far - then we can add it\r\n\tpromoArray.append({ 'ID' : role.id, 'Name' : role.name, 'XP' : xp })\r\n\tsetServerStat(ctx.message.server, globals.serverList, \"PromotionArray\", promoArray)\r\n\r\n\tmsg = '{} added to list. Required xp: {}'.format(role.name, xp)\r\n\tawait bot.send_message(ctx.message.channel, msg)\r\n\treturn",
"async def setlinkrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredLinkRole\", \"\")\r\n\t\t\tmsg = 'Add/remove links now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredLinkRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove links set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)",
"async def setmuterole(self, ctx, *, role = None):\r\n if not await Utils.is_bot_admin_reply(ctx): return\r\n if role:\r\n target_role = DisplayName.roleForName(role, ctx.guild)\r\n if not target_role: return await ctx.send(\"That role doesn't exist - you can create a new mute role with `{}createmuterole [role_name]` though.\".format(ctx.prefix))\r\n try: mute_role = ctx.guild.get_role(int(self.settings.getServerStat(ctx.guild,\"MuteRole\")))\r\n except: mute_role = None\r\n await ctx.send(\"Current mute role: **{}**\".format(Utils.suppressed(ctx,mute_role.name)) if mute_role else \"Currently, there is **no mute role** setup.\")\r\n if role is None:\r\n if mute_role:\r\n await self._ask_perms(ctx,mute_role,desync=True,show_count=True)\r\n self.settings.setServerStat(ctx.guild,\"MuteRole\",None)\r\n return await ctx.send(\"Mute role **removed** - muting will now create overrides per channel!\") if mute_role else None\r\n if mute_role:\r\n if mute_role == target_role:\r\n await ctx.send(\"Target mute role is **the same** as the current!\")\r\n return await self._ask_perms(ctx,target_role,desync=False,show_count=True)\r\n await self._ask_perms(ctx,mute_role,desync=True,show_count=True)\r\n # Got a mute role - let's set the id\r\n await ctx.send(\"Target mute role: **{}**\".format(Utils.suppressed(ctx,target_role.name)))\r\n self.settings.setServerStat(ctx.guild,\"MuteRole\",target_role.id)\r\n await self._ask_perms(ctx,target_role,desync=False,show_count=True)\r\n await ctx.send(\"The mute role has been set to **{}**!\".format(Utils.suppressed(ctx,target_role.name)))",
"async def addrole(self, ctx, role:discord.Role, *users:discord.User):\n if ctx.message.server.me.permissions_in(ctx.message.channel).manage_roles == False:\n await self.bot.say(\"Sorry, I do not have the manage_roles permission\\n**Aborting**\")\n return\n if len(users) == 0:\n await self.bot.say(\":no_entry: You need to specify a user to give the role too.\")\n idk = []\n for user in users:\n await self.bot.add_roles(user, role)\n idk.append(user.name)\n await self.bot.say(\"ok, gave user(s) `\" + \", \".join(idk) + \"` the role {0}\".format(role.name))",
"async def join_role(self, ctx, *, role: discord.Role):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n\n Data.server_data[str(ctx.guild.id)][\"join_role\"] = role.id\n await ctx.send(f\"This server's join role has been set to **{role}**\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove role from being blocked sending to the channel.
|
async def tempChannelsDenyRemove(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_DENY)() as roleDeny:
if not roleDeny or role.id not in roleDeny:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Deny: "
"**`{0}`** wasn't on the list.".format(role.name)
)
else:
roleDeny.remove(role.id)
self.logger.info(
"%s (%s) removed role %s from the deny list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role Deny: **`{0}`** "
"removed from the list.".format(role.name)
)
|
[
"async def tempChannelsAllowRemove(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:\n if not roleAllow or role.id not in roleAllow:\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel - Role Allow: \"\n \"**`{0}`** wasn't on the list.\".format(role.name)\n )\n else:\n roleAllow.remove(role.id)\n self.logger.info(\n \"%s (%s) removed role %s from the allow list on %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n role.name,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":white_check_mark: TempChannel - Role Allow: **`{0}`** \"\n \"removed from the list.\".format(role.name)\n )",
"def removeRole(self, role):\n pass",
"async def setbanned_removerole(self, ctx, *, role):\n server = ctx.message.server\n self.check_server_settings(server)\n server_role = discord.utils.get(server.roles, name=role)\n if server_role is None:\n await self.bot.say(\n '{} is not a valid role on this server.'.format(role))\n return\n self.check_server_settings(server)\n if server_role.id not in self.settings[server.id][\"ROLES\"]:\n await self.bot.say(\n '{} is not on in the list.'.format(role))\n return\n self.settings[server.id][\"ROLES\"].remove(server_role.id)\n await self.bot.say(\n 'Removed {} from list of roles.'.format(role))\n dataIO.save_json(JSON, self.settings)",
"async def _unrequest(self, ctx, role : str):\n\n # attempt to find role that user specied for removal\n auth = ctx.message.author\n serv = ctx.message.guild\n role = dh.get_role(serv, role)\n guild_id = str(serv.id)\n role_id = str(role.id)\n\n # if user failed to find specify role, complain\n if not role:\n await ctx.send('Please specify a valid role')\n return\n\n # get a list of roles that are listed as public and the user roles\n available_roles = self.conf.get(guild_id, {}).get('pub_roles', [])\n user_roles = discord.utils.find(lambda r: str(r.id) == role_id, auth.roles)\n\n # ONLY remove roles if they are in the public roles list\n # Unless there is no list,\n # in which case any of the user's roles can be removed\n if role_id in available_roles or user_roles:\n await auth.remove_roles(role)\n await ctx.send(ok('you no longer have that role'))\n else:\n await ctx.send(error('I\\'m afraid that I can\\'t remove that role'))",
"async def _delmodrole(self, ctx: commands.Context, role: discord.Role):\n async with self.config.guild(ctx.guild).modroles() as modroles:\n if role.id in modroles:\n modroles.remove(role.id)\n await ctx.send(f\"{role} role has been removed.\")\n else:\n await ctx.send(\"That role isn't in the list.\")",
"async def remove(self, ctx, *, user_name: str):\n\n async with self.bot.db.execute(\"SELECT role_id FROM mapset_channels WHERE user_id = ? AND channel_id = ?\",\n [int(ctx.author.id), int(ctx.channel.id)]) as cursor:\n role_id_list = await cursor.fetchone()\n if not role_id_list:\n await ctx.send(\"not your mapset channel\")\n return\n\n member = get_member_helpers.get_member_guaranteed(ctx, user_name)\n if not member:\n await ctx.send(\"No member found with what you specified. Try using a Discord account ID.\")\n return\n\n role = ctx.guild.get_role(int(role_id_list[0]))\n if not role:\n await ctx.reply(\"Looks like the role for this mapset channel no longer exists.\")\n return\n\n try:\n await member.remove_roles(role, reason=\"removed from mapset\")\n await ctx.send(f\"removed {member.mention} from this channel\")\n except discord.Forbidden:\n await ctx.reply(\"I do not have permissions to remove roles.\")",
"def clear(self, role_name):\n self.interactions[:] = [i for i in self.interactions if i.role != role_name]",
"async def _remove(self, ctx: commands.Context, user: discord.Member, role: discord.Role):\n async with self.config.member(user).temp_roles() as user_tr:\n if not (user_tr.get(str(role.id))):\n return await ctx.send(\n f\"That is not an active TempRole for {user.mention}.\",\n allowed_mentions=discord.AllowedMentions.none()\n )\n del user_tr[str(role.id)]\n message = f\"TempRole {role.mention} for {user.mention} has been removed.\"\n await ctx.send(\n message,\n allowed_mentions=discord.AllowedMentions.none()\n )\n await self._maybe_send_log(ctx.guild, message)\n await self._tr_end(user, role, admin=ctx.author)",
"async def on_raw_reaction_remove(self, reaction):\n channel = self.bot.get_channel(reaction.channel_id)\n message = await channel.fetch_message(reaction.message_id)\n\n cached_selfrole_message = next((item for item in self.cached_selfrole_msgs if\n str(item[\"channel_id\"]) == str(channel.id) and str(item[\"message_id\"]) == str(\n message.id)), None)\n\n if message.author.id == self.bot.user.id and cached_selfrole_message is None: # We need to lazy-cache that!\n await self.lazy_cache(reaction.channel_id)\n cached_selfrole_message = next((item for item in self.cached_selfrole_msgs if\n str(item[\"channel_id\"]) == str(channel.id) and str(\n item[\"message_id\"]) == str(\n message.id)), None)\n\n if cached_selfrole_message is None: # if it still is None then skip this event\n return\n\n if cached_selfrole_message:\n values = cached_selfrole_message\n await self.bot.http.remove_role(reaction.guild_id, reaction.user_id, int(values[\"mention_id\"]))\n self.stats_roles_revoked += 1",
"def removes_channel(channel):",
"async def remove(self, ctx, *, role_name):\n found_role = None\n for role in ctx.guild.roles:\n if role.name.lower() == role_name.lower():\n found_role = role\n if found_role:\n try:\n success = await \\\n self.bot.pg_utils.remove_autoassign_role(\n ctx.guild.id, found_role.id, self.bot.logger)\n except ValueError:\n local_embed = discord.Embed(\n title=f'{found_role.name} is already'\n ' not on the auto-assignable list',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return\n if success:\n local_embed = discord.Embed(\n title=f'Removed {found_role.name} '\n 'from auto-assignable roles',\n description=' ',\n color=0x419400\n )\n else:\n local_embed = discord.Embed(\n title=f'Internal error occured,'\n ' please contact @dashwav#7785',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n else:\n local_embed = discord.Embed(\n title=f'Couldn\\'t find role {role_name}',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)",
"async def _remove(self, ctx: commands.Context, role: discord.Role, *channels: typing.Union[discord.VoiceChannel, discord.CategoryChannel]):\n\n async with self.config.guild(ctx.guild).stream_roles() as settings:\n if existing := settings.get(str(role.id)):\n for c in channels:\n if c.id in existing:\n settings[str(role.id)].remove(c.id)\n if not settings[str(role.id)]:\n del settings[str(role.id)]\n else:\n return await ctx.send(\"There are no VCs or Categories with that StreamRole!\")\n\n return await ctx.tick()",
"async def on_raw_reaction_remove(self, payload):\n\n if payload.message_id != self.target_message_id:\n return\n \n guild = client.get_guild(payload.guild_id)\n member = guild.get_member(payload.user_id)\n\n if payload.emoji.name == '💩':\n role = discord.utils.get(guild.roles, name='Poo Man')\n await member.remove_roles(role)\n elif payload.emoji.name == '💀':\n role = discord.utils.get(guild.roles, name='Dead Man')\n await member.remove_roles(role)\n elif payload.emoji.name == '💪':\n role = discord.utils.get(guild.roles, name='Strong Man')\n await member.remove_roles(role)",
"async def remove_(self, ctx):\n\n # Get a reference to the current guild data\n self.check_guild_data_exists(ctx.guild.id)\n current_guild_data = self.guild_data[ctx.guild.id]\n\n # Fetch the role and user ignores\n ignores = current_guild_data[\"ignores\"]\n\n if len(role_mentions := ctx.message.role_mentions) == 0 and len(user_mentions := ctx.message.mentions) == 0:\n await ctx.send(\"Please provide a user/role to unignore.\")\n return\n\n # List to keep track of role ignores that were removed\n removed_roles = []\n\n # Check which roles to remove, if any\n for role_id in ignores[\"roles\"]:\n ignore_role = ctx.guild.get_role(role_id)\n if ignore_role in role_mentions:\n ignores[\"roles\"].remove(role_id)\n removed_roles.append(ignore_role.mention)\n\n # List to keep track of user ignores that were removed\n removed_users = []\n\n # Check which roles to remove, if any\n for user_id in ignores[\"users\"]:\n ignore_member = ctx.guild.get_member(user_id)\n if ignore_member in user_mentions:\n ignores[\"users\"].remove(user_id)\n removed_users.append(ignore_member.mention)\n\n # Make an embed saying which roles and users were unignored\n removed_embed = discord.Embed(title=\"Removed Ignores\",\n color=discord.Color.red())\n\n removed_role_str = utilities.pretty_print_list(\n removed_roles) or \"No roles unignored.\"\n removed_user_str = utilities.pretty_print_list(\n removed_users) or \"No users unignored.\"\n\n # Add removed ignore fields to embed\n removed_embed.add_field(\n name=\"Roles\", value=removed_role_str, inline=False)\n removed_embed.add_field(\n name=\"Users\", value=removed_user_str, inline=False)\n\n await ctx.send(embed=removed_embed)",
"def remove_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.remove(user)",
"async def remove_roles(self, ctx, message: typing.Union[discord.Message, int]):\n if not ctx.guild_profile.reactions.roles:\n return await ctx.send_line(f\"{ctx.emotes.web_emotion.xx} {ctx.guild.name} has no reactions roles set.\", ctx.guild.icon_url)\n message_id = message.id if isinstance(message, discord.Message) else message\n if message_id not in ctx.guild_profile.reactions.roles:\n return await ctx.send_line(\"❌ That message doesn't contains any reaction roles.\")\n if not await ctx.confirm():\n return\n await ctx.guild_profile.reactions.remove_roles(message_id)\n await ctx.send_line(f\"{ctx.emotes.web_emotion.galka} Reaction roles has been removed for provided message.\")",
"def remove_receiver(self, receiver):\n\n for o in make_iter(receiver):\n try:\n self.senders.remove(o)\n except ValueError:\n pass # nothing to remove",
"async def suspend(message: discord.Message, channel: discord.Channel=Annotate.Self):\n send = channel.overwrites_for(message.server.default_role).send_messages\n print(send, False if send is None else not send)\n overwrite = discord.PermissionOverwrite(send_messages=False if send is None else not send)\n await client.edit_channel_permissions(channel, message.server.default_role, overwrite)\n\n try:\n if overwrite.send_messages:\n await client.say(message, \"{} is no longer suspended.\".format(channel.mention))\n else:\n await client.say(message, \"Suspended {}.\".format(channel.mention))\n except discord.Forbidden: # ...\n await client.send_message(message.author, \"You just removed my send permission in {}.\".format(channel.mention))",
"async def removeadmin(ctx, role : discord.Role = None):\r\n\tisAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\r\n\t# Only allow admins to change server stats\r\n\tif not isAdmin:\r\n\t\tawait bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')\r\n\t\treturn\r\n\r\n\tif role == None:\r\n\t\tmsg = 'Usage: `$removeadmin [role]`'\r\n\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\treturn\r\n\r\n\tif type(role) is str:\r\n\t\ttry:\r\n\t\t\trole = discord.utils.get(message.server.roles, name=role)\r\n\t\texcept:\r\n\t\t\tprint(\"That role does not exist\")\r\n\t\t\treturn\r\n\r\n\t# If we're here - then the role is a real one\r\n\tpromoArray = getServerStat(ctx.message.server, globals.serverList, \"AdminArray\")\r\n\r\n\tfor aRole in promoArray:\r\n\t\t# Get the role that corresponds to the id\r\n\t\tif aRole['ID'] == role.id:\r\n\t\t\t# We found it - let's remove it\r\n\t\t\tpromoArray.remove(aRole)\r\n\t\t\tsetServerStat(ctx.message.server, globals.serverList, \"AdminArray\", promoArray)\r\n\t\t\tmsg = '{} removed successfully.'.format(aRole['Name'])\r\n\t\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\t\treturn\r\n\r\n\t# If we made it this far - then we didn't find it\r\n\tmsg = '{} not found in list.'.format(aRole['Name'])\r\n\tawait bot.send_message(ctx.message.channel, msg)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deletes the temp channel, if it exists.
|
async def tempChannelsDelete(self, ctx: Context):
guildConfig = self.config.guild(ctx.guild)
channelId = await guildConfig.get_attr(KEY_CH_ID)()
channelCreated = await guildConfig.get_attr(KEY_CH_CREATED)()
if channelCreated and channelId:
# Channel created, see when we should delete it.
try:
chanObj = self.bot.get_channel(channelId)
await chanObj.delete()
except discord.DiscordException:
self.logger.error("Could not delete channel!", exc_info=True)
await ctx.send(
":warning: TempChannel: Something went wrong "
"while trying to delete the channel. Please "
"check the console log for details."
)
else:
await guildConfig.get_attr(KEY_CH_ID).set(None)
await guildConfig.get_attr(KEY_CH_CREATED).set(False)
self.logger.info(
"%s (%s) deleted the temp channel #%s (%s) in %s (%s).",
ctx.author.name,
ctx.author.id,
chanObj.name,
chanObj.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(":white_check_mark: TempChannel: Channel deleted")
else:
await ctx.send(
":negative_squared_cross_mark: TempChannel: There is no "
"temporary channel to delete!"
)
|
[
"async def on_channel_delete(self, channel):",
"def test__Channel__delete__2():\n guild_id = 202211090005\n channel_id = 202211090006\n \n guild = Guild.precreate(guild_id)\n \n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_category, guild_id = guild_id)\n guild.channels[channel_id] = channel\n \n channel._delete(None)\n \n vampytest.assert_not_in(channel_id, guild.channels)",
"def removes_channel(channel):",
"def delete_channel(self, channel_name, project_name, dataset_name):\n url = self.url() + \"/nd/resource/dataset/{}\".format(dataset_name)\\\n + \"/project/{}\".format(project_name) + \\\n \"/channel/{}/\".format(channel_name)\n\n req = self.remote_utils.delete_url(url)\n\n if req.status_code is not 204:\n raise RemoteDataUploadError('Could not delete {}'.format(req.text))\n if req.content == \"\" or req.content == b'':\n return True\n else:\n return False",
"def _delete_temp_folder(self):\n temp = os.path.normpath(self.Pub2SD + '/Temp')\n if os.path.exists(temp):\n shutil.rmtree(temp)\n self.qr.put(('STATUS', \"Deleting old temporary folder.\"))",
"def deleteChannel(self, channelName):\r\n\t\tif self.channelExists(channelName):\r\n\t\t\tself.db(self.db.user_channels.channel_title.lower()==channelName.lower()).delete()",
"def test__Channel__delete__1():\n client_id = 202211090003\n channel_id = 202211090004\n \n client = Client(\n token = 'token_20221209_0001',\n client_id = client_id,\n )\n \n try:\n channel = Channel.precreate(channel_id, channel_type = ChannelType.private_group)\n client.group_channels[channel_id] = channel\n \n channel._delete(client)\n \n vampytest.assert_not_in(channel_id, client.group_channels)\n \n # Cleanup\n finally:\n client._delete()\n client = None",
"async def deleter(self, ctx):\r\n async with self.lock:\r\n channels = await self.conf.all_channels()\r\n sending = \"\"\r\n for c, data in channels.items():\r\n c = self.bot.get_channel(int(c))\r\n if c is None:\r\n continue\r\n if c.guild.id == ctx.guild.id and int(data[\"wait\"]) != 0:\r\n sending += f\"{c.mention}: {data['wait']} seconds\\n\"\r\n if sending:\r\n await ctx.send(sending)\r\n else:\r\n await ctx.send(\r\n f\"No channels are currently being tracked. Add one by using `{ctx.prefix}deleter channel`.\"\r\n )",
"async def channel_delete(self, channel, *, reason = None):\n channel_id = get_channel_id(channel, Channel.is_in_group_guild)\n \n await self.http.channel_delete(channel_id, reason)",
"def channelDelete(self, values_dict, type_id):\n\n url = \"/channels/{0}.xml\".format(values_dict['channelList'])\n parms = {'api_key': self.pluginPrefs.get('apiKey', '')}\n\n response, response_dict = self.sendToThingspeak('delete', url, parms)\n\n if response == 200:\n indigo.server.log(u\"Channel successfully deleted.\".format(response))\n else:\n self.logger.warning(u\"Problem deleting channel data.\")\n\n return True",
"def destroy_channel(self, channel, message=None):\n caller = self.caller\n\n channel_key = channel.key\n if message is None:\n message = (\n f\"|rChannel {channel_key} is being destroyed. \"\n \"Make sure to clean any channel aliases.|n\"\n )\n if message:\n channel.msg(message, senders=caller, bypass_mute=True)\n channel.delete()\n logger.log_sec(f\"Channel {channel_key} was deleted by {caller}\")",
"def channelDestroyed(self, channel):",
"async def channeldelete(self, ctx):\n status = await self.bot.pool.fetch(\"SELECT * FROM loggingsettings WHERE guildid = $1\", ctx.guild.id)\n\n if status[0][\"channel_delete\"] == True:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET channel_delete = $1 WHERE guildid = $2\", False, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned off for channels being deleted.\")\n await ctx.send(embed=embed)\n return\n else:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET channel_delete = $1 WHERE guildid = $2\", True, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned on for channels being deleted.\")\n await ctx.send(embed=embed)",
"def delete_channel(self, channel_id: int):\n delete(self.channels, id=channel_id)",
"async def remove(self, ctx, target_channel: discord.TextChannel):\n if not isinstance(target_channel, discord.TextChannel):\n await ctx.send(\"that is not a valid channel fam\", delete_after=4)\n return\n try:\n message_id = await self.bot.pg_controller.get_message_info(\n ctx.channel.id, target_channel.id)\n except Exception as e:\n await ctx.send(\"something broke\", delete_after=3)\n return\n if not message_id:\n return\n og_message = await ctx.channel.fetch_message(int(message_id))\n\n try:\n # removes the channel watching from the db\n await self.bot.pg_controller.rm_channel_chanreact(target_channel, ctx.channel.id)\n except:\n pass\n try:\n # resets the perms\n await target_channel.edit(sync_permissions=True)\n except:\n pass\n\n for i in range(len(self.bot.chanreact)):\n # removes the channel from the bot cacheing\n if self.bot.chanreact[i]['message_id'] == message_id and \\\n self.bot.chanreact[i]['host_channel'] == ctx.channel.id and \\\n self.bot.chanreact[i]['target_channel'] == target_channel.id:\n del self.bot.chanreact[i]\n break\n\n await og_message.delete()\n await self.bot.pg_controller.rem_channel_message(target_channel.id, ctx.channel.id) # removes the channel for user watching\n await ctx.message.delete()",
"def test__Channel__iter_delete__2():\n guild_id = 202211090022\n channel_id = 202211090023\n \n guild = Guild.precreate(guild_id)\n \n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_category, guild_id = guild_id)\n guild.channels[channel_id] = channel\n \n channels = {*channel._iter_delete(None)}\n \n vampytest.assert_eq(channels, {channel})\n vampytest.assert_not_in(channel_id, guild.channels)",
"async def _destroy_channel_if_is_empty(self, channel_id):\n if channel_id in self._channels:\n if self._channels[channel_id].is_empty:\n del self._channels[channel_id]",
"async def delchan(self, ctx, channel: typing.Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel], *, reason=\"None given.\"):\n\t\tawait channel.delete(reason=f\"Channel deleted by {ctx.author} ({ctx.author.id}) with reason: {reason}.\")\n\t\ttry: await ctx.send(f\"Deleted `#{channel.name}`!\")\n\t\texcept: await ctx.author.send(f\"Deleted `#{channel.name}`!\")",
"async def delete_channel(self, msg, *, name:str): #Allow Admin/Mod or Creator of that channel delete it\n name = name.replace(\" \",\"-\").lower()\n mod_bool= False\n Roles= self.redis.hgetall(\"{}:Channel:Config\".format(msg.message.server.id))\n Roles= \"{},{}\".format(Roles[\"Admin_Roles\"],Roles[\"Roles\"])\n if name in self.Temp_Chan[msg.message.server.id]:\n for role in msg.message.author.roles:\n print(role)\n if role.name in Roles:\n mod_bool = True\n break\n if msg.message.author.id == self.Temp_Chan[msg.message.server.id][name][\"Creator\"] or mod_bool is True:\n await self.bot.delete_channel(self.Temp_Chan[msg.message.server.id][name][\"Name\"])\n await self.bot.say_edit(\"{} is now delete.\".format(name.replace(\"-\",\" \")))\n else:\n await self.bot.say_edit(\"You do not have right to delete this!\\nYou need to be either creator of {} or mod\".format(name))\n else:\n await self.bot.say_edit(\"{} does not exist! Please double check spelling\".format(name))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loop to check whether or not we should create/delete the TempChannel.
|
async def checkChannels(self): # pylint: disable=too-many-branches,too-many-statements
while self == self.bot.get_cog("TempChannels"):
await asyncio.sleep(SLEEP_TIME)
# Create/maintain the channel during a valid time and duration, else
# delete it.
for guild in self.bot.guilds:
async with self.config.guild(guild).all() as guildData:
try:
if not guildData[KEY_ENABLED]:
continue
if (
int(time.strftime("%H")) == guildData[KEY_START_HOUR]
and int(time.strftime("%M")) == guildData[KEY_START_MIN]
and not guildData[KEY_CH_CREATED]
and not guildData[KEY_CH_ID]
):
# See if ALL of the following is satisfied.
# - It is the starting time.
# - The channel creation flag is not set.
# - The channel ID doesn't exist.
#
# If it is satisfied, let's create a channel, and then
# store the following in the settings:
# - Channel ID.
# - Time to delete channel.
# Start with permissions
# Always allow the bot to read.
permsDict = {self.bot.user: PERMS_READ_Y}
if guildData[KEY_ROLE_ALLOW]:
# If we have allow roles, automatically deny @everyone the "Read
# Messages" permission.
permsDict[guild.default_role] = PERMS_READ_N
for roleId in guildData[KEY_ROLE_ALLOW]:
role = discord.utils.get(guild.roles, id=roleId)
self.logger.debug("Allowed role %s", role)
if role:
permsDict[role] = deepcopy(PERMS_READ_Y)
# Check for deny permissions.
if guildData[KEY_ROLE_DENY]:
for roleId in guildData[KEY_ROLE_DENY]:
role = discord.utils.get(guild.roles, id=roleId)
self.logger.debug("Denied role %s", role)
if role and role not in permsDict.keys():
self.logger.debug("Role not in dict, adding")
permsDict[role] = deepcopy(PERMS_SEND_N)
elif role:
self.logger.debug("Updating role")
permsDict[role].update(send_messages=False)
self.logger.debug("Current permission overrides: \n%s", permsDict)
# Grab parent category. If not set, this will return None anyways.
category = None
if guildData[KEY_CH_CATEGORY]:
category = discord.utils.get(
guild.channels, id=guildData[KEY_CH_CATEGORY]
)
chanObj = await guild.create_text_channel(
guildData[KEY_CH_NAME],
overwrites=permsDict,
category=category,
position=guildData[KEY_CH_POS],
topic=guildData[KEY_CH_TOPIC],
nsfw=guildData[KEY_NSFW],
)
self.logger.info(
"Channel #%s (%s) in %s (%s) was created.",
chanObj.name,
chanObj.id,
guild.name,
guild.id,
)
guildData[KEY_CH_ID] = chanObj.id
# Set delete times, and save settings.
duration = (
guildData[KEY_DURATION_HOURS] * 60 * 60
+ guildData[KEY_DURATION_MINS] * 60
)
guildData[KEY_STOP_TIME] = time.time() + duration
guildData[KEY_CH_CREATED] = True
elif guildData[KEY_CH_CREATED]:
# Channel created, see when we should delete it.
if time.time() >= guildData[KEY_STOP_TIME]:
self.logger.debug(
"Past channel stop time, clearing ID " "and created keys."
)
chanObj = guild.get_channel(guildData[KEY_CH_ID])
guildData[KEY_CH_ID] = None
guildData[KEY_CH_CREATED] = False
if chanObj and guildData[KEY_ARCHIVE]:
await chanObj.set_permissions(
guild.default_role, overwrite=PERMS_READ_N
)
for role in guild.roles:
if role == guild.default_role:
continue
await chanObj.set_permissions(
role, overwrite=None, reason="Archiving tempchannel"
)
currentDate = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
await chanObj.edit(name=f"tc-{currentDate}")
self.logger.info(
"Channel #%s (%s) in %s (%s) was archived.",
chanObj.name,
chanObj.id,
guild.name,
guild.id,
)
elif chanObj and not guildData[KEY_ARCHIVE]:
await chanObj.delete()
self.logger.info(
"Channel #%s (%s) in %s (%s) was deleted.",
chanObj.name,
chanObj.id,
guild.name,
guild.id,
)
except Exception: # pylint: disable=broad-except
self.logger.error(
"Something went terribly wrong for server %s (%s)!",
guild.name,
guild.id,
exc_info=True,
)
|
[
"async def tempChannelsDelete(self, ctx: Context):\n guildConfig = self.config.guild(ctx.guild)\n channelId = await guildConfig.get_attr(KEY_CH_ID)()\n channelCreated = await guildConfig.get_attr(KEY_CH_CREATED)()\n\n if channelCreated and channelId:\n # Channel created, see when we should delete it.\n try:\n chanObj = self.bot.get_channel(channelId)\n await chanObj.delete()\n except discord.DiscordException:\n self.logger.error(\"Could not delete channel!\", exc_info=True)\n await ctx.send(\n \":warning: TempChannel: Something went wrong \"\n \"while trying to delete the channel. Please \"\n \"check the console log for details.\"\n )\n else:\n await guildConfig.get_attr(KEY_CH_ID).set(None)\n await guildConfig.get_attr(KEY_CH_CREATED).set(False)\n self.logger.info(\n \"%s (%s) deleted the temp channel #%s (%s) in %s (%s).\",\n ctx.author.name,\n ctx.author.id,\n chanObj.name,\n chanObj.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\":white_check_mark: TempChannel: Channel deleted\")\n else:\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel: There is no \"\n \"temporary channel to delete!\"\n )",
"def test_channel_created(self):\n self.assertTrue(Channel.objects.get(name=self.channel))",
"async def tempChannelsToggle(self, ctx: Context):\n guildConfig = self.config.guild(ctx.guild)\n enabled = await guildConfig.get_attr(KEY_ENABLED)()\n if enabled:\n enabled = False\n self.logger.info(\n \"%s (%s) DISABLED the temp channel for %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\":negative_squared_cross_mark: TempChannel: Disabled.\")\n else:\n enabled = True\n self.logger.info(\n \"%s (%s) ENABLED the temp channel for %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\":white_check_mark: TempChannel: Enabled.\")\n await guildConfig.get_attr(KEY_ENABLED).set(enabled)",
"async def test_multiple_closed():\n src = create_channel()\n src.close()\n m = create_multiple(src)\n await asyncio.sleep(0.05)\n ch = create_channel()\n assert not m.add_output(ch)",
"def at_channel_create(self):\n pass",
"def test__Channel__iter_delete__2():\n guild_id = 202211090022\n channel_id = 202211090023\n \n guild = Guild.precreate(guild_id)\n \n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_category, guild_id = guild_id)\n guild.channels[channel_id] = channel\n \n channels = {*channel._iter_delete(None)}\n \n vampytest.assert_eq(channels, {channel})\n vampytest.assert_not_in(channel_id, guild.channels)",
"def _clean_channel_resource(self, channel_name):\r\n if self.channel_resources.get(channel_name):\r\n self.channel_resources[channel_name].handler.close_thread()\r\n self.channel_resources[channel_name].handler.web_event.set()\r\n self.channel_resources[channel_name].handler.image_event.set()\r\n del self.channel_resources[channel_name]\r\n logging.info(\"clean channel: %s's resource\", channel_name)",
"def test__Channel__delete__2():\n guild_id = 202211090005\n channel_id = 202211090006\n \n guild = Guild.precreate(guild_id)\n \n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_category, guild_id = guild_id)\n guild.channels[channel_id] = channel\n \n channel._delete(None)\n \n vampytest.assert_not_in(channel_id, guild.channels)",
"def test_channel_create(self):\n channels = Channel.objects.filter(pk=self.channel.id)\n self.assertTrue(channels.exists())\n self.assertEqual(channels.count(), 1)\n self.assertEqual(channels[0].name, self.channel.name)",
"def at_channel_create(self):\r\n pass",
"def test_create_same_channel_thrice(self):\n name: str = \"#rocket2\"\n self.mock_sc.channels_create.return_value = {\"ok\": True,\n \"name\": name}\n assert self.bot.create_channel(name) == name\n try:\n self.mock_sc.channels_create.return_value =\\\n {\"ok\": False, \"error\": \"name_taken\"}\n assert self.bot.create_channel(name) == name\n self.mock_sc.channels_create.return_value =\\\n {\"ok\": False, \"error\": \"invalid_name\"}\n self.bot.create_channel(name)\n except SlackAPIError as e:\n assert e.error == \"invalid_name\"",
"def test_createChannel() -> json:\r\n\r\n # Test data\r\n channel_name = \"Test Channel |+_)(*&^%$#@!~\"\r\n description = \"description _)(*?:%;№\"\r\n read_only = \"false\"\r\n read_only_privacy = \"\"\r\n password = \"\"\r\n languages = \"\"\r\n hash_tags = \"hash_tag1234567890v6dg46s5d4gr6s5dg46s54h6a5d4rg56431m31x\"\r\n geo_tag = \"10.000, 20.000\"\r\n avatar = \"\"\r\n hide_in_ui = \"false\"\r\n status = False\r\n myChannel = ''\r\n\r\n # Action\r\n _, my_channels = u.getChannels(filter=\"\", channel_type=2)\r\n if len(my_channels) < 10:\r\n status, myChannel = u.createChannel(channel_name, description, read_only, read_only_privacy, password,\r\n languages, hash_tags, geo_tag, avatar, hide_in_ui)\r\n time.sleep(3) # wait for uchan database sync ends\r\n u.deleteChannel(myChannel, password) # cleanup step\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, myChannel)\r\n else:\r\n raise Exception(\"There are 10 channels. Cant create more\")",
"async def newtemp(self, ctx, *, name):\n server = ctx.message.server\n perms = ctx.message.server.get_member(\n self.bot.user.id).server_permissions\n\n cname = str(name)\n\n if server.id not in self.settings:\n self.initial_config(server.id)\n\n if perms.manage_channels is False:\n await self.bot.say('I do not have permission to do that')\n elif self.settings[server.id]['toggleactive'] is False:\n await self.bot.say('This command is currently turned off.')\n else:\n channel = await self.bot.create_channel(\n server, cname, type=discord.ChannelType.voice)\n if self.settings[server.id]['toggleowner'] is True:\n overwrite = discord.PermissionOverwrite()\n overwrite.manage_channels = True\n overwrite.manage_roles = True\n await self.bot.edit_channel_permissions(\n channel, ctx.message.author, overwrite)\n self.settings[server.id]['channels'].append(channel.id)\n self.save_json()",
"async def deleter(self, ctx):\r\n async with self.lock:\r\n channels = await self.conf.all_channels()\r\n sending = \"\"\r\n for c, data in channels.items():\r\n c = self.bot.get_channel(int(c))\r\n if c is None:\r\n continue\r\n if c.guild.id == ctx.guild.id and int(data[\"wait\"]) != 0:\r\n sending += f\"{c.mention}: {data['wait']} seconds\\n\"\r\n if sending:\r\n await ctx.send(sending)\r\n else:\r\n await ctx.send(\r\n f\"No channels are currently being tracked. Add one by using `{ctx.prefix}deleter channel`.\"\r\n )",
"def test__Channel__iter_delete__1():\n client_id = 202211090020\n channel_id = 202211090021\n \n client = Client(\n token = 'token_20221209_0003',\n client_id = client_id,\n )\n \n try:\n channel = Channel.precreate(channel_id, channel_type = ChannelType.private_group)\n client.group_channels[channel_id] = channel\n \n channels = {*channel._iter_delete(client)}\n \n vampytest.assert_eq(channels, {channel})\n vampytest.assert_not_in(channel_id, client.group_channels)\n \n # Cleanup\n finally:\n client._delete()\n client = None",
"async def tempChannelsArchive(self, ctx: Context):\n guildConfig = self.config.guild(ctx.guild)\n archiving = await guildConfig.get_attr(KEY_ARCHIVE)()\n if archiving:\n archiving = False\n self.logger.info(\n \"%s (%s) DISABLED archiving the temp channel for %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":negative_squared_cross_mark: TempChannel: Archiving disabled. \"\n \" The channel will be deleted after its lifetime expires.\"\n )\n else:\n archiving = True\n self.logger.info(\n \"%s (%s) ENABLED archiving the temp channel for %s (%s)\",\n ctx.author.name,\n ctx.author.id,\n ctx.guild.name,\n ctx.guild.id,\n )\n await ctx.send(\n \":white_check_mark: TempChannel: Archiving enabled. The channel \"\n \"will have ALL user permissions revoked after its lifetime \"\n \"expires, and will be renamed with the date and time that it \"\n \"was archived.\"\n )\n await guildConfig.get_attr(KEY_ARCHIVE).set(archiving)",
"def test_and_restart_stream(self, ):\n try:\n self.stream.is_active()\n except:\n self.create_stream()",
"def create():\n\n # Get channel name from form\n newChannel = request.form.get(\"channel\")\n\n if newChannel in channelsCreated:\n return render_template(\"error.html\", message=\"that channel already exists!\")\n\n # Add channel to global list of channels\n channelsCreated.append(newChannel)",
"def test_make_and_clean_temp(self):\n c = GitCopier(\"test_source\")\n\n # ensure that the temp directory is created and attributes are set\n c.make_temp()\n assert hasattr(c, \"_temp_dir\")\n temp_dir = c._temp_dir\n assert os.path.isdir(temp_dir)\n assert hasattr(c, \"_temp_dir\")\n\n # ensure that the function is idempotent\n c.make_temp()\n # ensure that the temp directory is the same\n assert temp_dir == c._temp_dir\n assert os.path.isdir(c._temp_dir)\n assert hasattr(c, \"_temp_dir\")\n\n # ensure that the temp directory is removed\n c.clean_temp()\n assert not os.path.isdir(temp_dir)\n assert not hasattr(c, \"_temp_dir\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of sentences, returns a preprocessed list of sentences (Very basic preprocessing)
|
def preprocess(list_of_sentences):
ret_list = []
for f in list_of_sentences:
f = f.lower()
f= f.replace('\n', '')
f= f.replace('?','')
ret_list.append(f)
return ret_list
|
[
"def _preprocess(sentences, preprocess_pipeline, word_tokenize=None):\n if preprocess_pipeline is not None:\n for function in preprocess_pipeline:\n sentences = function(sentences)\n\n if word_tokenize is None:\n return sentences\n else:\n return sentences, [word_tokenize(sentence) for sentence in sentences]",
"def preprocess_sentences(sentences, vocab):\n # Add sentence boundaries, canonicalize, and handle unknowns\n words = flatten([\"<s>\"] + s + [\"</s>\"] for s in sentences)\n words = [canonicalize_word(w, wordset=vocab.word_to_id)\n for w in words]\n return np.array(vocab.words_to_ids(words))",
"def _preprocess_sentences(sentences_by_chapters: Sequence[Tuple[str, Sequence[str]]]) \\\n -> Sequence[Tuple[str, Sequence[Sequence[str]]]]:\n processed_words_by_chapters = []\n for header, sentences in sentences_by_chapters:\n # Process every chapter by going through its sentences\n processed_words_by_chapter = []\n for sentence in sentences:\n # Process every sentence within the chapter into a list of words\n processed_sentence = _preprocess_text(sentence)\n processed_words_by_chapter.append(processed_sentence)\n processed_words_by_chapters.append((header, processed_words_by_chapter))\n return processed_words_by_chapters",
"def preprocess_dataset(dataset):\n return [preprocess(document) for document in dataset]",
"def split_into_sentences(text):\n if \".)\" in text: text = text.replace(\".)\", \"<prd>)\")\n sentences = text.split(\".\")\n text = text.replace(\"<prd>\", \".\")\n for s in sentences:\n s = s.replace(\"<prd>\", \".\")\n return sentences",
"def data_to_conll(sentences):\n new_sentences = []\n for sentence in sentences:\n tags = [tup[-1] for tup in sentence]\n new_tags = tags_to_conll(tags)\n new_sentences.append([\n tup[:-1] + [tag] for tup, tag in zip(sentence, new_tags)\n ])\n return new_sentences",
"def preprocess(corpus):\n corpus = [item.rstrip() for article in corpus for item in article]\n corpus = [item for item in corpus if item != \"\"]\n return corpus",
"def sentence_tokenize(self, text_list):\n return [sent_tokenize(text, language=self.lang) for text in text_list]",
"def preprocess_text_spacy(\n texts,\n n_jobs=1,\n batch_size=100\n):\n texts = [preprocess(t) for t in texts]\n return list(get_spacy_parse(texts, batch_size=batch_size, n_threads=n_jobs))",
"def sentence_pre_processing(raw_sentence):\n words = np.asarray(word_tokenize(raw_sentence.lower())) # lower case and tokenization\n punctuation_removed = map(remove_punctuation, words) # remove punctuation\n stopwords_filtered = filter(lambda word: word not in ALL_STOPWORDS, punctuation_removed) # stop word removal\n return np.asarray(list(filter(is_alphanumeric, stopwords_filtered))) # remove non-alphanumeric words",
"def clean(sentences: list) -> list:\n cleaned = [sentence.replace(\"\\n\", \" \") for sentence in sentences]\n return cleaned",
"def sentence_tokenize(input_text):\n sent_lst = []\n sent_pipe = PARSER.create_pipe(\"sentencizer\")\n PARSER.add_pipe(sent_pipe)\n doc = PARSER(input_text)\n for sent in doc.sents:\n sent_lst.append(sent.text)\n return sent_lst",
"def prep_text(mission):\n sentences = nltk.sent_tokenize(mission)\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n return sentences",
"def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n # tokens = nltk.word_tokenize(sentence)\n tokens = GetNounPhrases(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)",
"def sentencePreProcess(path, congruency=None, beat_type=None, extraction=None, check_beat=None):\n output_list = []\n with open(path, 'r') as f: #open stimuli file as object \n rawText = f.readlines()\n\n if beat_type == 'binary' and congruency == 'congruent':\n sent_offset = 7\n elif beat_type == 'binary' and congruency == 'incongruent1':\n sent_offset = 8\n elif beat_type == 'ternary' and congruency == 'congruent':\n sent_offset = 11 \n elif beat_type == 'ternary' and congruency == 'incongruent1':\n sent_offset = 12\n elif beat_type == 'ternary' and congruency == 'incongruent2':\n sent_offset = 10\n elif congruency == 'neutral':\n sent_offset = 8\n else:\n sent_offset = None\n\n # seperate the individual words and then turn underscore into spaces\n for sent_idx, line in enumerate(rawText): # iterate over lines in raw \n sentence = line[:].replace('\\n', '') # getting rid of the line break thing\n sentence = sentence.split(' ') # splitting the sentence up by spaces\n for word_idx, word in enumerate(sentence[:]): # iterate over words\n sentence[word_idx] = word.replace('_', ' ') # cleaning off the underscore and turning it into space\n stim_data = {'sent_stim':sentence, 'beat_type':beat_type, \n 'congruency':congruency, 'extraction': extraction, 'sent_number': sent_idx, \n 'check_beat': check_beat, 'trial_type': 'main', 'sent_offset': sent_offset,}\n output_list.append(stim_data)\n\n return output_list",
"def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n tokens = nltk.word_tokenize(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)",
"def tag_tokenized_sentences(self, sentences):\n return [self.tag_tokenized(sentence) for sentence in sentences]",
"def simplify(self):\n #c = 0\n simp_sentences = []\n for s in self.sentences:\n\n #print \"Original: \" + s\n \n simp_sentences.append(self.transformation(s, ''))\n\n ## for demonstration purposes only. remove the prints later\n #print \"Simplified: \",\n #print simp_sentences[c]\n #c+=1\n\n #print \n return simp_sentences",
"def tokenize_with_preprocess(text):\n return map(__stemmer.stem, filter(lambda w: w not in stop,\n nltk.word_tokenize(re.sub(_punc_pattern, '', text.lower()))))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inserts a general power value into the `power_provenance` table.
|
def insert_power(self, description, the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO power_provenance(
description, the_value)
VALUES(?, ?)
""", [description, the_value])
|
[
"def insert_core(self, x, y, p, description, the_value):\n with self.transaction() as cur:\n core_id = self._get_core_id(cur, x, y, p)\n cur.execute(\n \"\"\"\n INSERT INTO core_provenance(\n core_id, description, the_value)\n VALUES(?, ?, ?)\n \"\"\", [core_id, description, the_value])",
"def insert_monitor(self, x, y, description, the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO monitor_provenance(\n x, y, description, the_value)\n VALUES(?, ?, ?, ?)\n \"\"\", [x, y, description, the_value])",
"def insert_gatherer(self, x, y, address, bytes_read, run, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO gatherer_provenance(\n x, y, address, bytes, run, description, the_value)\n VALUES(?, ?, ?, ?, ?, ?, ?)\n \"\"\", [x, y, address, bytes_read, run, description, the_value])",
"def voting_power(self, voting_power):\n\n self._voting_power = voting_power",
"def insert_connector(\n self, pre_population, post_population, the_type, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT OR IGNORE INTO connector_provenance(\n pre_population, post_population, the_type, description,\n the_value)\n VALUES(?, ?, ?, ?, ?)\n \"\"\",\n [pre_population, post_population, the_type, description,\n the_value])",
"def set_power(self, power):\n pass",
"def insert_product(self, data):\n query = \"INSERT INTO Products VALUES (NULL, %s, %s, %s, %s, %s)\"\n self.mycursor.execute(query, data)\n self.connector.commit()",
"def insert_coin(self, val, label_inserted_money):\n self.vending_machine.insert_coin(val)\n self.print_inserted_money(label_inserted_money)",
"def insert_sig_point_into_db(self, connection, cursor):\n\n cursor.execute(\"\"\"INSERT INTO sig_waypoint \n (ctry_iso3,\n wpt_ident,\n lon_src,\n lat_src,\n location)\n VALUES (%s, %s, %s, %s,\n ST_GeomFromText('POINT(%s %s)', 4326));\"\"\",\n (self.ctry_iso3,\n self.wpt_ident,\n self.lon_src,\n self.lat_src,\n self.lon_dd,\n self.lat_dd))\n connection.commit()",
"def set_power(self, power):\r\n self._power = power",
"def test_power(val: Union[Val, Real], power: Union[Val, Real], expected: Val):\n assert dataclasses.astuple(val ** power) == pytest.approx(dataclasses.astuple(expected))",
"def insert_properties_data(self, cursor, record):\n corrupt = False\n values = \" VALUES (\\'%s\\'\" % record.get(\"zone\")\n if record.get('sub_zone') is None:\n values += \", NULL\"\n else:\n values += \", \\'%s\\'\" % record.get(\"sub_zone\")\n if record.get('wave_exp') is None:\n values += \", NULL\"\n else:\n values += \", \\'%s\\'\" % record.get(\"wave_exp\")\n values += \")\"\n query = \"\"\"INSERT INTO `cnx_logger_properties`\n (`zone`, `sub_zone`, `wave_exp`)\"\"\" + values\n try:\n res = cursor.execute(query)\n except MySQLdb.Error:\n res = 0\n if res == 1:\n pass\n else:\n corrupt = True\n return cursor.lastrowid, corrupt",
"def test_insert_dict(self):\n value = {\"value1\": 56}\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertFalse(result)",
"def test_log_phenotypes(self):\n np.testing.assert_array_equal(self.binary.log.phenotypes, np.log10(self.phenotypes))",
"def set_cp(self, power):\n self.load_off()\n time.sleep(0.1)\n self._ser.write(f'POW {power:.5f}')\n self._ser.write('OUTP ON')\n time.sleep(0.1)\n print(f'{self._name} CP LOAD: {power:.5f}W\\n')",
"def insert_expenses(expense_type, cost):\n insert_expense_command = \"\"\"insert into {} (Expense_Type, Expense_Amount) values (?,?)\"\"\".format(current_month)\n insert_expense_name = expense_type\n insert_expense_amt = cost\n multi_expense_insert = insert_expense_name, insert_expense_amt\n conn.execute(insert_expense_command, multi_expense_insert)\n conn.execute(\"commit;\")",
"def setPower(self,value):\r\n if value>self.powmax:\r\n print '!!!!!!!!!!!!!!!\\n!!! POWER TOO HIGH !!!\\n!!!!!!!!!!!!!!!!'\r\n else:\r\n self.ctrl.write(\"*CLS;POW %s dbm\" %value)",
"def test_insert_number(self):\n value = 12\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertTrue(result)",
"def insertStoreEntry(self,store_id,sku,price,exist,location):\n cursor = self.mydb.cursor()\n query = \"INSERT INTO Walmart{} (sku, Price, availability, location) VALUES ('{}',{},{},'{}')\".format(store_id,sku,price,exist,location)\n print(\"Successfully inserted Walmart{} with SKU={} entry\".format(store_id,sku))\n cursor.execute(query)\n cursor.close()\n self.mydb.commit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Records provenance into the `gatherer_provenance` table.
|
def insert_gatherer(self, x, y, address, bytes_read, run, description,
the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO gatherer_provenance(
x, y, address, bytes, run, description, the_value)
VALUES(?, ?, ?, ?, ?, ?, ?)
""", [x, y, address, bytes_read, run, description, the_value])
|
[
"def add_provenance(self, source_field, term, notification_field, matched, explanation):\n uc = dataobj.to_unicode()\n obj = {\n \"source_field\" : self._coerce(source_field, uc),\n \"term\" : self._coerce(term, uc),\n \"notification_field\" : self._coerce(notification_field, uc),\n \"matched\" : self._coerce(matched, uc),\n \"explanation\" : self._coerce(explanation, uc)\n }\n self._add_to_list(\"provenance\", obj)",
"def graph_provenance_gatherer():\n progress = ProgressBar(\n FecDataView.get_n_vertices() +\n FecDataView.get_n_partitions(),\n \"Getting provenance data from application graph\")\n for vertex in progress.over(FecDataView.iterate_vertices(), False):\n if isinstance(vertex, AbstractProvidesLocalProvenanceData):\n vertex.get_local_provenance_data()\n for m_vertex in vertex.machine_vertices:\n if isinstance(m_vertex, AbstractProvidesLocalProvenanceData):\n m_vertex.get_local_provenance_data()\n for partition in progress.over(\n FecDataView.iterate_partitions()):\n for edge in partition.edges:\n if isinstance(edge, AbstractProvidesLocalProvenanceData):\n edge.get_local_provenance_data()",
"def set_provenance(self, network_id, provenance):\n self._require_auth()\n route = \"/network/%s/provenance\" % network_id\n if isinstance(provenance, dict):\n put_json = json.dumps(provenance)\n else:\n put_json = provenance\n return self.put(route, put_json)",
"def provenance(self) -> Dict:\n import fragmenter\n import openeye\n\n return {\"fragmenter\": fragmenter.__version__, \"openeye\": openeye.__version__}",
"def get_provenance(self, network_id):\n route = \"/network/%s/provenance\" % network_id\n return self.get(route)",
"def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('jdbrawn_jliang24_slarbi_tpotye', 'jdbrawn_jliang24_slarbi_tpotye')\n\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', 'https://data.boston.gov/api/action/datastore_search?resource_id=')\n doc.add_namespace('591', 'http://datamechanics.io/data/jdbrawn_jliang24_slarbi_tpotye/')\n doc.add_namespace('bdp1', 'https://data.cityofboston.gov/resource/')\n\n this_script = doc.agent('alg:jdbrawn_jliang24_slarbi_tpotye#newStations', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n\n resource_safetyScore = doc.entity('dat:jdbrawn_jliang24_slarbi_tpotye#safetyScore', {'prov:label': 'Safety Scores', prov.model.PROV_TYPE: 'ont:DataSet'})\n resource_colleges = doc.entity('dat:jdbrawn_jliang24_slarbi_tpotye#colleges', {'prov:label': 'Boston Universities and Colleges', prov.model.PROV_TYPE: 'ont:DataSet'})\n\n\n get_newStations = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n\n doc.wasAssociatedWith(get_newStations, this_script)\n\n doc.usage(get_newStations, resource_safetyScore, startTime, None, {prov.model.PROV_TYPE: 'ont:Computation'})\n doc.usage(get_newStations, resource_colleges, startTime, None, {prov.model.PROV_TYPE: 'ont:Computation'})\n\n newLocation = doc.entity('dat:jdbrawn_jliang24_slarbi_tpotye#newStations', {prov.model.PROV_LABEL: 'New Police Stations', prov.model.PROV_TYPE: 'ont:DataSet'})\n \n doc.wasAttributedTo(newLocation, this_script)\n doc.wasGeneratedBy(newLocation, get_newStations, endTime)\n doc.wasDerivedFrom(newLocation, resource_safetyScore, get_newStations, get_newStations, get_newStations)\n doc.wasDerivedFrom(newLocation, resource_colleges, get_newStations, get_newStations, get_newStations)\n\n repo.logout()\n\n return doc",
"def __fetchProvenance(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath)\n pD = provU.fetch()\n return pD[self.__provKeyName] if self.__provKeyName in pD else {}\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def record_snapshots(self, population):\n if pan.skip(pan.SNAPSHOT_RATE_) or len(population) == 0:\n return\n\n # genotypes\n df_gen = pd.DataFrame(np.array(population.genomes.reshape(len(population), -1)))\n df_gen.reset_index(drop=True, inplace=True)\n df_gen.columns = [str(c) for c in df_gen.columns]\n df_gen.to_feather(self.paths[\"snapshots_genotypes\"] / f\"{pan.stage}.feather\")\n\n # phenotypes\n df_phe = pd.DataFrame(np.array(population.phenotypes))\n df_phe.reset_index(drop=True, inplace=True)\n df_phe.columns = [str(c) for c in df_phe.columns]\n df_phe.to_feather(self.paths[\"snapshots_phenotypes\"] / f\"{pan.stage}.feather\")\n\n # demography\n dem_attrs = [\"ages\", \"births\", \"birthdays\"]\n demo = {attr: getattr(population, attr) for attr in dem_attrs}\n df_dem = pd.DataFrame(demo, columns=dem_attrs)\n df_dem.reset_index(drop=True, inplace=True)\n df_dem.to_feather(self.paths[\"snapshots_demography\"] / f\"{pan.stage}.feather\")",
"def insert_monitor(self, x, y, description, the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO monitor_provenance(\n x, y, description, the_value)\n VALUES(?, ?, ?, ?)\n \"\"\", [x, y, description, the_value])",
"def save_provenance(results_dir, parser):\n results_dir = os.path.join(results_dir, \"prov\")\n if not os.path.exists(results_dir):\n os.makedirs(results_dir, 0o755)\n\n # Create a PHP file to list the contents of the prov dir.\n php_path = os.path.join(results_dir, \"index.php\")\n with open(php_path, \"w\") as f:\n contents = \"\"\"\n <?php\n # Taken from:\n # https://stackoverflow.com/questions/3785055/how-can-i-create-a-simple-index-html-file-which-lists-all-files-directories\n $path = \".\";\n $dh = opendir($path);\n $i=1;\n while (($file = readdir($dh)) !== false) {\n if($file != \".\" && $file != \"..\" && $file != \"index.php\" && $file != \".htaccess\" && $file != \"error_log\" && $file != \"cgi-bin\") {\n echo \"<a href='$path/$file'>$file</a><br /><br />\";\n $i++;\n }\n }\n closedir($dh);\n ?>\n \"\"\"\n f.write(contents)\n try:\n _save_env_yml(results_dir)\n except Exception:\n traceback.print_exc()\n\n _save_parameter_files(results_dir, parser)\n\n _save_python_script(results_dir, parser)",
"def insert_report(self, message):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO reports(message)\n VALUES(?)\n \"\"\", [message])\n recorded = cur.lastrowid\n cutoff = get_config_int(\"Reports\", \"provenance_report_cutoff\")\n if cutoff is None or recorded < cutoff:\n logger.warning(message)\n elif recorded == cutoff:\n logger.warning(f\"Additional interesting provenance items in \"\n f\"{self._database_file}\")",
"def annotate_record(self, record, variant_result):\n record.INFO['variant_id'] = variant_result.variant_id\n record.INFO['gene'] = \",\".join(variant_result.genes)\n record.INFO['gnomad_exomes_AF'] = variant_result.gnomad_exomes_af\n record.INFO['gnomad_genomes_AF'] = variant_result.gnomad_genomes_af\n record.ALT = variant_result.alt\n record.POS = variant_result.pos\n record.ID = \";\".join(variant_result.rs_ids) or \".\"\n return record",
"def populate_donorsummary_table():\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('donor.db')\n \n logger.info('working with donor summary table')\n\n DONOR_NAME = 0\n SUM_DONATIONS = 1\n COUNT_DONATIONS = 2\n\n donors = [\n ('Chris Cornell', 115000.00, 2),\n ('Kim Thayil', 560000.00, 2)\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for donor in donors:\n avg_don = donor[SUM_DONATIONS]/donor[COUNT_DONATIONS]\n\n with database.transaction():\n new_donor = DonorSummary.create(\n donor_name = donor[DONOR_NAME],\n sum_donations = donor[SUM_DONATIONS],\n count_donations = donor[COUNT_DONATIONS],\n average_donations = avg_don)\n new_donor.save()\n logger.info('Database add successful')\n\n logger.info('Print the donor records we saved')\n for saved_donor in DonorSummary:\n logger.info(f'''{saved_donor.donor_name}, \n sum donations: {saved_donor.sum_donations}, \n count_donations: {saved_donor.count_donations},\n average_donations: {saved_donor.average_donations}''')\n\n except Exception as e:\n logger.info(f'Error creating = {donor[DONOR_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()",
"def added_to_village(profile, added_by, student):\n triggering = profile.notify_added_to_village\n data = {'added-by-id': added_by.id, 'student-id': student.id}\n _record(profile, types.ADDED_TO_VILLAGE, triggering=triggering, data=data)",
"def provenance_xml_file(self):\n return self._get_derived_path('_provenance', '.xml')",
"def save_process_table(self):\n if self.procs != None:\n f = open(self.proctable_path, 'w')\n f.write(self.processtable_header)\n for id in self.procs.keys():\n proc = self.procs[id]\n f.write(self.processtable_line % (id, proc['product'], proc['product_energy'],\n proc['time']))\n f.close()",
"def postprocess(self):\n log.info('Starting post-processor for evidence {0:s}'.format(self.name))\n log.debug('Evidence state: {0:s}'.format(self.format_state()))\n self._postprocess()\n if self.parent_evidence:\n self.parent_evidence.postprocess()",
"def describe_source_record(self):",
"def get_provenance_record(caption: str, ancestors: list):\n record = {\n 'caption':\n caption,\n 'domains': ['reg'],\n 'authors': [\n 'kalverla_peter',\n 'smeets_stef',\n 'brunner_lukas',\n 'camphuijsen_jaro',\n ],\n 'references': [\n 'brunner2019',\n 'lorenz2018',\n 'knutti2017',\n ],\n 'ancestors':\n ancestors,\n }\n return record"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inserts data into the `monitor_provenance` table.
|
def insert_monitor(self, x, y, description, the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO monitor_provenance(
x, y, description, the_value)
VALUES(?, ?, ?, ?)
""", [x, y, description, the_value])
|
[
"def insert_board_provenance(self, connections):\n if not connections:\n return\n with self.transaction() as cursor:\n cursor.executemany(\n \"\"\"\n INSERT OR IGNORE INTO boards_provenance(\n ethernet_x, ethernet_y, ip_addres)\n VALUES (?, ?, ?)\n \"\"\", ((x, y, ipaddress)\n for ((x, y), ipaddress) in connections.items()))",
"def insert_gatherer(self, x, y, address, bytes_read, run, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO gatherer_provenance(\n x, y, address, bytes, run, description, the_value)\n VALUES(?, ?, ?, ?, ?, ?, ?)\n \"\"\", [x, y, address, bytes_read, run, description, the_value])",
"def insert_connector(\n self, pre_population, post_population, the_type, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT OR IGNORE INTO connector_provenance(\n pre_population, post_population, the_type, description,\n the_value)\n VALUES(?, ?, ?, ?, ?)\n \"\"\",\n [pre_population, post_population, the_type, description,\n the_value])",
"def insert_report(self, message):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO reports(message)\n VALUES(?)\n \"\"\", [message])\n recorded = cur.lastrowid\n cutoff = get_config_int(\"Reports\", \"provenance_report_cutoff\")\n if cutoff is None or recorded < cutoff:\n logger.warning(message)\n elif recorded == cutoff:\n logger.warning(f\"Additional interesting provenance items in \"\n f\"{self._database_file}\")",
"def insert_plant_history_record_list(plant_list):\n sql = \"\"\" INSERT INTO plant_history(plant_id, humidity, time_recorded) VALUES (%s,%s,%s)\"\"\"\n conn = None\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n # execute the INSERT statement\n cur.executemany(sql,plant_list)\n # commit the changes to the database\n conn.commit()\n # close communication with the database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()",
"def metadataInsert(self, data, md5=False, db=None):\n\n localClose = False\n if not db:\n db = self.dbConnect()\n localClose = True\n cursor = db.cursor()\n\n for obj in data:\n md5 = files.fileMD5(obj[\"object_path\"])\n\n sql = (\n \"INSERT INTO %s.PLADMIN_METADATA VALUES('%s', '%s', '%s','%s', sysdate, TO_DATE('%s','RRRR/MM/DD HH24:MI:SS'))\"\n % (\n self.user,\n obj[\"object_name\"],\n obj[\"object_type\"],\n obj[\"object_path\"],\n md5,\n obj[\"last_ddl_time\"],\n )\n )\n cursor.execute(sql)\n\n cursor.close()\n\n if localClose:\n db.commit()\n db.close()",
"def test_do_insert(test_dao):\n DUT = dtmHazardAnalysis(test_dao)\n DUT.do_select_all(revision_id=1)\n\n _error_code, _msg = DUT.do_insert(revision_id=1, hardware_id=2)\n\n assert _error_code == 0\n assert _msg == (\"RAMSTK SUCCESS: Adding one or more items to the RAMSTK \"\n \"Program database.\")\n assert DUT.last_id == 9",
"def insert_properties_data(self, cursor, record):\n corrupt = False\n values = \" VALUES (\\'%s\\'\" % record.get(\"zone\")\n if record.get('sub_zone') is None:\n values += \", NULL\"\n else:\n values += \", \\'%s\\'\" % record.get(\"sub_zone\")\n if record.get('wave_exp') is None:\n values += \", NULL\"\n else:\n values += \", \\'%s\\'\" % record.get(\"wave_exp\")\n values += \")\"\n query = \"\"\"INSERT INTO `cnx_logger_properties`\n (`zone`, `sub_zone`, `wave_exp`)\"\"\" + values\n try:\n res = cursor.execute(query)\n except MySQLdb.Error:\n res = 0\n if res == 1:\n pass\n else:\n corrupt = True\n return cursor.lastrowid, corrupt",
"def insert_power(self, description, the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO power_provenance(\n description, the_value)\n VALUES(?, ?)\n \"\"\", [description, the_value])",
"def insert_router(\n self, x, y, description, the_value, expected=True):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO router_provenance(\n x, y, description, the_value, expected)\n VALUES(?, ?, ?, ?, ?)\n \"\"\", [x, y, description, the_value, expected])",
"def insert(connection, row_data):\n cur = connection.cursor()\n cur.execute(\"INSERT INTO pomodoros VALUES (?,?,?,?)\", row_data)",
"def insert_core(self, x, y, p, description, the_value):\n with self.transaction() as cur:\n core_id = self._get_core_id(cur, x, y, p)\n cur.execute(\n \"\"\"\n INSERT INTO core_provenance(\n core_id, description, the_value)\n VALUES(?, ?, ?)\n \"\"\", [core_id, description, the_value])",
"def insert_audit(model, dataset_id, measure, value, user_name):\n\n try:\n conn = psycopg2.connect(user='basic',\n password=os.environ['database_password'],\n host='127.0.0.1',\n port='5432',\n database='modelmetadata')\n\n cur = conn.cursor()\n\n # query for audit insertion\n query = \"\"\"insert into audits (model_name, dataset_id, measure, value, user_name) values\n\t\t\t\t(%s, %s, %s, %s, %s)\"\"\"\n\n # execution of the query\n cur.execute(query, (model, dataset_id, measure, float(value), user_name))\n\n # commit\n conn.commit()\n\n result = True\n\n except (Exception, psycopg2.Error) as error:\n print(\"Error while connecting to PostgreSQL\", error)\n result = False\n finally:\n # closing database connection.\n if (conn):\n cur.close()\n conn.close()\n return result",
"def add_provenance(self, source_field, term, notification_field, matched, explanation):\n uc = dataobj.to_unicode()\n obj = {\n \"source_field\" : self._coerce(source_field, uc),\n \"term\" : self._coerce(term, uc),\n \"notification_field\" : self._coerce(notification_field, uc),\n \"matched\" : self._coerce(matched, uc),\n \"explanation\" : self._coerce(explanation, uc)\n }\n self._add_to_list(\"provenance\", obj)",
"def insert_row(self, data):\n print(\"Inserting row to database\")\n self.cursor.executemany(self.insert_query, data)\n self.connection.commit()",
"def insert_data(self, data:dict,):\n \n assert(isinstance(data, dict))\n field, value = \"\", \"\"\n for key in data.keys():\n field += key + ', '\n value += ':' + key + ', '\n \n field = field[:-2]\n value = value[:-2]\n sql_query = \"\"\"INSERT INTO %s (%s) VALUES(%s)\"\"\"%(self.db, field, value)\n self.conn.execute(sql_query, data)\n self.conn.commit()",
"def insert(self, data):\n if '_rev' in data:\n self.__not_opened()\n raise PreconditionsException(\n \"Can't add record with forbidden fields\")\n _rev = self.create_new_rev()\n if not '_id' in data:\n try:\n _id = self.id_ind.create_key()\n except:\n self.__not_opened()\n raise DatabaseException(\"No id?\")\n else:\n _id = data['_id']\n assert _id is not None\n data['_rev'] = _rev # for make_key_value compat with update / delete\n data['_id'] = _id\n self._insert_indexes(_rev, data)\n ret = {'_id': _id, '_rev': _rev}\n data.update(ret)\n return ret",
"def _insert_in_tmp_share_table(data):\n description = data['description']\n values = (\n data['database_code'] + \"/\" + data['dataset_code'],\n data['name'],\n _extract_isin(description),\n \"Quandl\",\n _extract_currency(description),\n description,\n data['oldest_available_date'],\n data['newest_available_date'])\n\n sql = 'INSERT INTO \"TmpShare\" VALUES (%s, %s, %s, %s, %s, %s, %s, %s)'\n _connection.execute(sql, values)",
"def __insert_into_database(request_data: list, predictions: list) -> None:\n try:\n db_connection = __connect()\n cur = db_connection.cursor()\n try:\n date = datetime.now()\n data_joined = []\n\n # Joining data as tuples\n for input, predict in zip(request_data, predictions):\n row_data = (date, f\"{input}\", predict)\n data_joined.append(row_data)\n\n # Inserting data as a batch into database\n insert_query = \"insert into history (date,features,prediction) values %s\"\n psycopg2.extras.execute_values(\n cur, insert_query, data_joined, template=None, page_size=100\n )\n except:\n print(\"Couldn't insert values\")\n db_connection.close()\n except:\n print(\"Couldn't connect to database\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inserts data into the `router_provenance` table.
|
def insert_router(
self, x, y, description, the_value, expected=True):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO router_provenance(
x, y, description, the_value, expected)
VALUES(?, ?, ?, ?, ?)
""", [x, y, description, the_value, expected])
|
[
"def insert_board_provenance(self, connections):\n if not connections:\n return\n with self.transaction() as cursor:\n cursor.executemany(\n \"\"\"\n INSERT OR IGNORE INTO boards_provenance(\n ethernet_x, ethernet_y, ip_addres)\n VALUES (?, ?, ?)\n \"\"\", ((x, y, ipaddress)\n for ((x, y), ipaddress) in connections.items()))",
"def insert_gatherer(self, x, y, address, bytes_read, run, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO gatherer_provenance(\n x, y, address, bytes, run, description, the_value)\n VALUES(?, ?, ?, ?, ?, ?, ?)\n \"\"\", [x, y, address, bytes_read, run, description, the_value])",
"def insert_monitor(self, x, y, description, the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO monitor_provenance(\n x, y, description, the_value)\n VALUES(?, ?, ?, ?)\n \"\"\", [x, y, description, the_value])",
"def insert_connector(\n self, pre_population, post_population, the_type, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT OR IGNORE INTO connector_provenance(\n pre_population, post_population, the_type, description,\n the_value)\n VALUES(?, ?, ?, ?, ?)\n \"\"\",\n [pre_population, post_population, the_type, description,\n the_value])",
"def insert_parsetree(self, parsetree):\n self.execute(\"INSERT INTO parsetrees \\\n (parsetree, query_id) \\\n VALUES (\" + \", \".join([self.wildcard]*2) +\")\",\n (parsetree.dumps(), parsetree.query_id))\n self.commit()",
"def insert_core(self, x, y, p, description, the_value):\n with self.transaction() as cur:\n core_id = self._get_core_id(cur, x, y, p)\n cur.execute(\n \"\"\"\n INSERT INTO core_provenance(\n core_id, description, the_value)\n VALUES(?, ?, ?)\n \"\"\", [core_id, description, the_value])",
"def insert(self, data):\n if '_rev' in data:\n self.__not_opened()\n raise PreconditionsException(\n \"Can't add record with forbidden fields\")\n _rev = self.create_new_rev()\n if not '_id' in data:\n try:\n _id = self.id_ind.create_key()\n except:\n self.__not_opened()\n raise DatabaseException(\"No id?\")\n else:\n _id = data['_id']\n assert _id is not None\n data['_rev'] = _rev # for make_key_value compat with update / delete\n data['_id'] = _id\n self._insert_indexes(_rev, data)\n ret = {'_id': _id, '_rev': _rev}\n data.update(ret)\n return ret",
"def insert(connection, row_data):\n cur = connection.cursor()\n cur.execute(\"INSERT INTO pomodoros VALUES (?,?,?,?)\", row_data)",
"def insert_row(self, data):\n print(\"Inserting row to database\")\n self.cursor.executemany(self.insert_query, data)\n self.connection.commit()",
"def _insert_data(_setup_database):\n\n _app = Saraki(__name__, db=None)\n _app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.environ[\"TEST_DATABASE_URI\"]\n database.init_app(_app)\n\n with _app.app_context():\n insert_actions()\n\n with _app.app_context():\n insert_resources()\n\n with _app.app_context():\n insert_persons()\n insert_products()\n insert_orders()\n insert_cartoons()\n insert_plans()\n insert_users()\n\n # Insert all registered resources and actions\n _app.init()\n\n database.session.commit()",
"def add_provenance(self, source_field, term, notification_field, matched, explanation):\n uc = dataobj.to_unicode()\n obj = {\n \"source_field\" : self._coerce(source_field, uc),\n \"term\" : self._coerce(term, uc),\n \"notification_field\" : self._coerce(notification_field, uc),\n \"matched\" : self._coerce(matched, uc),\n \"explanation\" : self._coerce(explanation, uc)\n }\n self._add_to_list(\"provenance\", obj)",
"def insert_data(self, data:dict,):\n \n assert(isinstance(data, dict))\n field, value = \"\", \"\"\n for key in data.keys():\n field += key + ', '\n value += ':' + key + ', '\n \n field = field[:-2]\n value = value[:-2]\n sql_query = \"\"\"INSERT INTO %s (%s) VALUES(%s)\"\"\"%(self.db, field, value)\n self.conn.execute(sql_query, data)\n self.conn.commit()",
"def __insert(self):\n try:\n conn = connect()\n cur = conn.cursor()\n sql = \"\"\"\n insert into room (\n room_id, host_id, room_type, country, city,\n neighborhood, address, reviews, overall_satisfaction,\n accommodates, bedrooms, bathrooms, price, deleted,\n minstay, latitude, longitude, survey_id\n )\n \"\"\"\n sql += \"\"\"\n values (%s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s\n )\"\"\"\n insert_args = (\n self.room_id, self.host_id, self.room_type, self.country,\n self.city, self.neighborhood, self.address, self.reviews,\n self.overall_satisfaction, self.accommodates, self.bedrooms,\n self.bathrooms, self.price, self.deleted, self.minstay,\n self.latitude, self.longitude, self.survey_id,\n )\n cur.execute(sql, insert_args)\n cur.close()\n conn.commit()\n logger.debug(\"Room \" + str(self.room_id) + \": inserted\")\n except psycopg2.IntegrityError:\n # logger.info(\"Room \" + str(self.room_id) + \": insert failed\")\n conn.rollback()\n cur.close()\n raise\n except:\n conn.rollback()\n raise",
"def set_provenance(self, network_id, provenance):\n self._require_auth()\n route = \"/network/%s/provenance\" % network_id\n if isinstance(provenance, dict):\n put_json = json.dumps(provenance)\n else:\n put_json = provenance\n return self.put(route, put_json)",
"def insert_data(self, request=None, response=None):\n \n try:\n # pnr id placeholder\n pnr_id = None\n \n # create connection\n con = connect_db()\n # cursor\n cur = con.cursor(\n cursor_factory=DictCursor\n )\n\n # -------*******------- Insert Data in 'pnr' table -------*******-------\n try:\n\n # ------- Select Payment from Database -------\n\n payment_id = None\n\n if not request[\"DataSource\"] in [\"B2B\", \"B2B_AGENT\", \"B2B_ADMIN\"] and request[\"TransactionID\"] is not None and not request[\"TransactionID\"] == \"\":\n\n try:\n # Get payment ID\n cur.execute(\n \"SELECT id FROM payment WHERE tran_id='{}'\".format(\n request.get(\"TransactionID\", None)\n )\n )\n\n payment = cur.fetchone()\n\n # assign payment id\n payment_id = payment[\"id\"]\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to SELECT payment from 'payment' Table!\"\n )\n )\n\n # prepare data\n sql_pnr_data_map = self.prepare_pnr_data_map(request=request, response=response)\n\n # get production status\n skytrip_conf = SkyTripConfig()\n is_production = skytrip_conf.get_is_production()\n # get sabre token\n sabre_token = json.dumps(request.get('ExistedToken', {}))\n # get utils\n utils = json.dumps(request.get('Utils', {}))\n\n # Field Count => 25\n cur.execute(\n \"INSERT INTO pnr(user_id, payment_id, pnr_no, is_production, is_ticketed, data_source, route_1_origin_location_code, route_1_destination_location_code, route_1_departure_date, route_2_origin_location_code, route_2_destination_location_code, route_2_departure_date, route_3_origin_location_code, route_3_destination_location_code, route_3_departure_date, route_4_origin_location_code, route_4_destination_location_code, route_4_departure_date, carrier_code, flight_number, cabin_class, total_amount, currency, utils, sabre_token) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id;\",\n (\n request.get(\"UserID\", None),\n payment_id,\n sql_pnr_data_map.get(\"pnr_no\", None),\n is_production,\n False,\n str(request.get(\"DataSource\", None)),\n sql_pnr_data_map.get(\"route_1_origin_location_code\", None),\n sql_pnr_data_map.get(\"route_1_destination_location_code\", None),\n sql_pnr_data_map.get(\"route_1_departure_date\", None),\n sql_pnr_data_map.get(\"route_2_origin_location_code\", None),\n sql_pnr_data_map.get(\"route_2_destination_location_code\", None),\n sql_pnr_data_map.get(\"route_2_departure_date\", None),\n sql_pnr_data_map.get(\"route_3_origin_location_code\", None),\n sql_pnr_data_map.get(\"route_3_destination_location_code\", None),\n sql_pnr_data_map.get(\"route_3_departure_date\", None),\n sql_pnr_data_map.get(\"route_4_origin_location_code\", None),\n sql_pnr_data_map.get(\"route_4_destination_location_code\", None),\n sql_pnr_data_map.get(\"route_4_departure_date\", None),\n sql_pnr_data_map.get(\"carrier_code\", None),\n sql_pnr_data_map.get(\"flight_number\", None),\n sql_pnr_data_map.get(\"cabin_class\", None),\n sql_pnr_data_map.get(\"total_amount\", None),\n sql_pnr_data_map.get(\"currency\", None),\n utils,\n sabre_token,\n )\n )\n\n # Commit the pnr transaction\n con.commit()\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to insert Data in 'pnr' Table!\"\n )\n )\n\n # -------*******------- Insert Data in 'pnr_details' table -------*******-------\n \n try:\n\n # get inserted PNR row\n inserted_pnr_row = cur.fetchone()\n # get PNR ID\n pnr_id = inserted_pnr_row['id']\n # get data map\n sql_pnr_details_data_map = self.prepare_pnr_details_data_map_list(request=request)\n\n # insert data in pnr details table for every individual passenger\n for SQLpnrDetails in sql_pnr_details_data_map:\n # Field Count => 19\n cur.execute(\n \"INSERT INTO pnr_details(pnr_id, passenger_name_number, passenger_given_name, passenger_surname, passenger_email, passenger_contact, passenger_dob, passenger_gender, passenger_type, passport_number, passport_issuing_country, passport_nationality_country, passport_expiration_date, visa_number, visa_applicable_country, visa_place_of_birth, visa_place_of_issue, visa_issue_date, visa_expiration_date) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\",\n (\n pnr_id,\n SQLpnrDetails.get(\"passenger_name_number\", None),\n SQLpnrDetails.get(\"passenger_given_name\", None),\n SQLpnrDetails.get(\"passenger_surname\", None),\n SQLpnrDetails.get(\"passenger_email\", None),\n SQLpnrDetails.get(\"passenger_contact\", None),\n SQLpnrDetails.get(\"passenger_dob\", None),\n SQLpnrDetails.get(\"passenger_gender\", None),\n SQLpnrDetails.get(\"passenger_type\", None),\n SQLpnrDetails.get(\"passport_number\", None),\n SQLpnrDetails.get(\"passport_issuing_country\", None),\n SQLpnrDetails.get(\"passport_nationality_country\", None),\n SQLpnrDetails.get(\"passport_expiration_date\", None),\n SQLpnrDetails.get(\"visa_number\", None),\n SQLpnrDetails.get(\"visa_applicable_country\", None),\n SQLpnrDetails.get(\"visa_place_of_birth\", None),\n SQLpnrDetails.get(\"visa_place_of_issue\", None),\n SQLpnrDetails.get(\"visa_issue_date\", None),\n SQLpnrDetails.get(\"visa_expiration_date\", None)\n )\n )\n\n # Commit each pnr_details transaction\n # con.commit()\n\n # Commit the whole pnr_details transaction\n con.commit()\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to insert Data in 'pnr_details' Table!\"\n )\n )\n\n # -------*******------- Insert Data in 'pnr_carrier_code_stats' table -------*******-------\n\n try:\n # Get Number of PNRs for the carrier\n cur.execute(\n \"SELECT * FROM pnr_carrier_code_stats WHERE carrier_code='{}'\".format(\n sql_pnr_data_map.get(\"carrier_code\", None)\n )\n )\n\n carrier_data = cur.fetchone()\n\n if carrier_data is not None:\n # assign number_of_pnrs\n number_of_pnrs = carrier_data[\"number_of_pnrs\"]\n # assign pnr_list\n carrier_pnr_list = carrier_data[\"pnr_list\"]\n carrier_pnr_list.append(sql_pnr_data_map.get(\"pnr_no\", None))\n\n if number_of_pnrs >= 0:\n try:\n number_of_pnrs = number_of_pnrs + 1\n # Update Existing carrier information\n cur.execute(\n \"UPDATE pnr_carrier_code_stats SET number_of_pnrs='{}', pnr_list='{}' WHERE carrier_code='{}'\".format(\n number_of_pnrs,\n json.dumps(carrier_pnr_list),\n sql_pnr_data_map.get(\"carrier_code\", None)\n )\n )\n # Commit the whole pnr_carrier_code_stats transaction\n con.commit()\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to update Data in 'pnr_carrier_code_stats' Table!\"\n )\n )\n else:\n try:\n number_of_pnrs = 1\n carrier_pnr_list = json.dumps([sql_pnr_data_map.get(\"pnr_no\", None)])\n # insert data in pnr details table for every individual passenger\n # Field Count => 3\n cur.execute(\n \"INSERT INTO pnr_carrier_code_stats(carrier_code, number_of_pnrs, pnr_list) values(%s,%s,%s);\",\n (\n sql_pnr_data_map.get(\"carrier_code\", None),\n number_of_pnrs,\n carrier_pnr_list\n )\n )\n\n # Commit the whole pnr_carrier_code_stats transaction\n con.commit()\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to insert Data in 'pnr_carrier_code_stats' Table!\"\n )\n )\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed Database trtansaction in 'pnr_carrier_code_stats' table!\"\n )\n )\n \n # Commit the whole transaction\n # con.commit()\n\n # close connection\n con.close()\n\n # return pnr id\n return pnr_id\n \n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to insert Data in Database!\"\n )\n )",
"def _insert(self, router, distanceVector):\r\n if router not in self.routingTable:\r\n self.routingTable[router] = {}\r\n\r\n dv = self.routingTable[router]\r\n\r\n for destinationRouter, distance, nextHopRouter in distanceVector:\r\n if destinationRouter not in dv:\r\n dv[destinationRouter] = {}\r\n\r\n dv[destinationRouter]['distance'] = distance\r\n dv[destinationRouter]['nextHopRouter'] = nextHopRouter",
"def insert_product(self, data):\n query = \"INSERT INTO Products VALUES (NULL, %s, %s, %s, %s, %s)\"\n self.mycursor.execute(query, data)\n self.connector.commit()",
"def insert(cls, env, record):\n with env.db_transaction as db:\n\n cursor = db.cursor()\n sqlString = \"\"\"INSERT INTO ticket_template_store\n (tt_time,tt_user,tt_name,tt_field,tt_value)\n VALUES (%s,%s,%s,%s,%s)\"\"\"\n cursor.execute(sqlString, record)",
"def load_preprocessed(study_id, params_table, filedir, filepathtype,\n params_id, prep_template_id, data_type):\n preproc_data = load_preprocessed_data_from_cmd(\n study_id, params_table, filedir, filepathtype, params_id,\n prep_template_id, data_type)\n click.echo(\"Preprocessed data successfully added to the database with \"\n \"id %s\" % preproc_data.id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inserts data for a specific core into the `core_provenance` table.
|
def insert_core(self, x, y, p, description, the_value):
with self.transaction() as cur:
core_id = self._get_core_id(cur, x, y, p)
cur.execute(
"""
INSERT INTO core_provenance(
core_id, description, the_value)
VALUES(?, ?, ?)
""", [core_id, description, the_value])
|
[
"def create_core(self, **kwargs):\n return CoreProfile(entity=self, **kwargs).save()",
"def core_product(self, core_product):\n\n self._core_product = core_product",
"def add_ip_to_core_map(self, interface, core):\n\n self.logger.info(\"Add interface %s and core [%s] into local map\",\n interface, core)\n if interface not in self.interface_core_map:\n return\n\n if self.is_ip_in_core_map(interface, core[0]):\n return\n else:\n core_list = self.interface_core_map[interface]\n core_list.append(core)",
"def create_original_core(self, interface):\n\n para_set = []\n for agent_id in range(ProcessAgent.AGENTTYPE_INTERFACE_STATUS, ProcessAgent.AGENTTYPE_L2TP + 1):\n para = provision_pb2.msg_agent_parameter()\n para.agent_id = agent_id\n para.parameter = interface\n para_set.append(para)\n\n core, reason = CCAPCore.add_ccap_core(\n self, para_set,\n initiated_by=self.CORE_INITIAL_TRIGGER[self.STARTUP_CORE_TRIGGER],\n interface=interface, test_flag=self.test_flag)\n\n if not core:\n self.logger.error(\n \"Cannot create core on interface %s, reason:%s \", interface, reason)\n return\n\n # Record the core\n if interface in self.interface_core_map:\n self.logger.warn(\n \"Interface %s core configuration should be NULL in this state.\", interface)\n else:\n self.interface_core_map[interface] = list()",
"def core_load(self, core, config_set='_default', verbose=False):\n\n existing_cores = self.cores(verbose)\n if core in existing_cores:\n print('Solr create: core with \"%s\" name already exist!' % core)\n return\n\n # cfg = 'configsets/_default/conf/solrconfig.xml'\n # other supported parameters with default values:\n # instanceDir: whatever is specified for \"name\" parameter is set\n # by default\n # config: name of the config file (i.e., solrconfig.xml)\n # relative to instanceDir.\n params = {\n 'action': 'CREATE',\n 'wt': 'json',\n 'name': core,\n 'config_set': config_set,\n 'instanceDir': 'mycores/%s' % core\n }\n\n if verbose:\n print('Solr core_load:')\n\n self._get('admin/cores', params, verbose)",
"def insert_power(self, description, the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO power_provenance(\n description, the_value)\n VALUES(?, ?)\n \"\"\", [description, the_value])",
"def test_core_files(self):\n # create a core.gdb file\n self.log.debug(\"Create a core.gdb.harness.advanced file in core_pattern dir.\")\n try:\n results = run_local(self.log, \"cat /proc/sys/kernel/core_pattern\", check=True)\n except RunException:\n self.fail(\"Unable to find local core file pattern\")\n core_path = os.path.split(results.stdout.splitlines()[-1])[0]\n core_file = \"{}/core.gdb.harness.advanced\".format(core_path)\n\n self.log.debug(\"Creating %s\", core_file)\n try:\n with open(core_file, \"w\", encoding=\"utf-8\") as local_core_file:\n local_core_file.write(\"THIS IS JUST A TEST\\n\")\n except IOError as error:\n self.fail(\"Error writing {}: {}\".format(local_core_file, str(error)))\n\n # Choose a server find the pid of its daos_engine process\n host = NodeSet(choice(self.server_managers[0].hosts)) # nosec\n ranks = self.server_managers[0].get_host_ranks(host)\n self.log.info(\"Obtaining pid of the daos_engine process on %s (rank %s)\", host, ranks)\n pid = None\n result = run_remote(self.log, host, \"pgrep --list-full daos_engine\", timeout=20)\n if not result.passed:\n self.fail(\"Error obtaining pid of the daos_engine process on {}\".format(host))\n pid = findall(r\"(\\d+)\\s+[A-Za-z0-9/]+daos_engine\\s+\", \"\\n\".join(result.output[0].stdout))[0]\n if pid is None:\n self.fail(\"Error obtaining pid of the daos_engine process on {}\".format(host))\n self.log.info(\"Found pid %s\", pid)\n\n # Send a signal 6 to its daos_engine process\n self.log.info(\"Sending a signal 6 to %s\", pid)\n if not run_remote(self.log, host, \"sudo -n kill -6 {}\".format(pid)).passed:\n self.fail(\"Error sending a signal 6 to {} on {}\".format(pid, host))\n\n # Simplify resolving the host name to rank by marking all ranks as\n # expected to be either running or errored (sent a signal 6)\n self.server_managers[0].update_expected_states(ranks, [\"Joined\", \"Errored\"])\n\n # Wait for the engine to create the core file\n ranks = self.server_managers[0].get_host_ranks(host)\n state = [\"errored\"]\n try:\n self.log.info(\n \"Waiting for the engine on %s (rank %s) to move to the %s state\",\n host, ranks, state)\n if self.server_managers[0].check_rank_state(ranks, state, 25):\n self.fail(\"Rank {} state not {} after sending signal 6\".format(ranks, state))\n finally:\n # Display the journalctl log for the process that was sent the signal\n self.server_managers[0].manager.dump_logs(host)\n\n self.log.info(\"Test passed\")",
"def setup_solr_core(solr_core, schema_xml=None, solrconfig_xml=None):\n source_dir = 'test-solr/solr/collection1'\n target_dir = 'test-solr/solr/{}'.format(solr_core)\n # Remove old core dir if it exists\n if os.path.isdir(target_dir):\n shutil.rmtree(target_dir)\n # Make a copy of the collection1 core\n shutil.copytree(\n source_dir,\n target_dir\n )\n\n # Remove core.properties from new core (requirement to add a new core)\n if os.path.isfile('{}/core.properties'.format(target_dir)):\n os.remove('{}/core.properties'.format(target_dir))\n # Load solrconfig.xml if file exists\n if not solrconfig_xml:\n solrconfig_xml = '{}-solrconfig.xml'.format(solr_core)\n if os.path.isfile('templates/{}'.format(solrconfig_xml)):\n prepare_solrconfig(solrconfig_xml, solr_core)\n # Load schema.xml if file exists\n if not schema_xml:\n schema_xml = '{}-schema.xml'.format(solr_core)\n if os.path.isfile('templates/{}'.format(schema_xml)):\n prepare_schema(schema_xml, solr_core)\n # Prepare stopwords\n prepare_stopwords_txt(solr_core)\n # Prepare synonyms\n prepare_synonyms_txt(solr_core)\n\n # Register the new core in Solr\n #core_admin = pysolr.SolrCoreAdmin('http://localhost:8989/solr/admin/cores')\n #core_admin.create('phrase_match')\n response = requests.get(\n 'http://localhost:8989/solr/' +\n 'admin/cores?action=CREATE' +\n '&name={}'.format(solr_core) +\n '&instanceDir={}'.format(solr_core)\n )\n\n if response.status_code != 200:\n raise\n solr = pysolr.Solr(SOLR_BASE_URL + '/' + solr_core, timeout=SOLR_TIMEOUT)\n return solr",
"def insert_gatherer(self, x, y, address, bytes_read, run, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO gatherer_provenance(\n x, y, address, bytes, run, description, the_value)\n VALUES(?, ?, ?, ?, ?, ?, ?)\n \"\"\", [x, y, address, bytes_read, run, description, the_value])",
"def _get_processes_for_core(graph: Graph, core: Tuple[int, int]) -> Optional[Iterable[Node]]:\n\n\treturn [node for node in graph if node.cpu_id == core[0] and node.core_id == core[1]]",
"def commit(self, core, verbose=False):\n\n post_header = {\n 'Content-Type': 'application/json',\n 'charset': 'utf-8'\n }\n\n binary_data = {\n 'commit': {}\n }\n\n if verbose:\n print('Solr commit:')\n\n self._post_core(core, 'update', post_header, binary_data, verbose)",
"def addCPU(self, core, speed, desc):\n self.cpus[core] = {\n \"Speed\" : speed,\n \"Description\" : desc,\n }\n return",
"def insert_monitor(self, x, y, description, the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO monitor_provenance(\n x, y, description, the_value)\n VALUES(?, ?, ?, ?)\n \"\"\", [x, y, description, the_value])",
"def get_core_ext(self):\n\n self.core = self.oracle.get_core()\n\n if self.core:\n # try to reduce the core by trimming\n self.trim_core()\n\n # filtering out extra hard clauses from the core\n iter1, iter2 = itertools.tee(self.core)\n self.filt = list(l for l in iter1 if l in self.ehset)\n self.core = list(l for l in iter2 if l not in self.ehset)\n\n # updating the union of all extra hard clauses involved in cores\n self.reason = self.reason.union(set(self.filt))\n\n # and by heuristic minimization\n self.minimize_core_ext()\n\n # the core may be empty after core minimization\n if not self.core:\n return\n\n # core weight\n self.minw = min(map(lambda l: self.wght[l], self.core))\n\n # dividing the core into two parts\n iter1, iter2 = itertools.tee(self.core)\n self.core_sels = list(l for l in iter1 if l in self.sels_set)\n self.core_sums = list(l for l in iter2 if l not in self.sels_set)",
"def start_debug_core(self, core: \"CoreTarget\") -> DelegateResult:\n pass",
"def insert_router(\n self, x, y, description, the_value, expected=True):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO router_provenance(\n x, y, description, the_value, expected)\n VALUES(?, ?, ?, ?, ?)\n \"\"\", [x, y, description, the_value, expected])",
"def will_start_debug_core(self, core: \"CoreTarget\") -> None:\n pass",
"def insert_connector(\n self, pre_population, post_population, the_type, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT OR IGNORE INTO connector_provenance(\n pre_population, post_population, the_type, description,\n the_value)\n VALUES(?, ?, ?, ?, ?)\n \"\"\",\n [pre_population, post_population, the_type, description,\n the_value])",
"def detect_core(self, core_path=None, system_name=None, re1_hostname=None, command=None):\n\n exclude_pattern = self._kwargs.get('core-exclude', None)\n core_cmd = command or 'show system core-dumps'\n def get_cores(fallback_method, core_path):\n if not fallback_method:\n core_raw_output = self.cli(command=core_cmd, format='text').response()\n if core_path:\n exec_core_path = 'ls -ltd ' + core_path\n additional_core = self.shell(command=exec_core_path).response()\n core_raw_output += \"\\n\" + additional_core\n return core_raw_output\n\n try:\n core_count = 0\n fallback_method = False # set to True if 'show system core-dumps' not available\n if core_path is None:\n core_path = self.core_path\n\n core_path = ' '.join(core_path)\n\n dev_start_time = self._device_start_time\n self.log(level='info', message='Device start time : ' + time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(dev_start_time)))\n\n year = self.shell(command='date +%Y').response()\n year = re.sub(r\"date \\+\\%Y.*\\n\", '', year)\n year = re.sub(r\"[^\\w| ]\", '', year)\n if not hasattr(t, \"vmhost_name_timestamp_list\"):\n setattr(t, \"vmhost_name_timestamp_list\", [])\n t.vmhostcore_name_timestamp_list = []\n # If device has vmhost we should be copying any cores from there to Junos\n if self.get_vmhost_infra():\n vmhost_cores = self.cli(command='show vmhost crash', format='text').response()\n vmhost_core_files = re.findall(r'(\\d*)\\s([a-zA-Z]{3}\\s[\\s\\d]{2}\\s+\\d+\\:\\d+)\\s+(\\S*)', str(vmhost_cores))\n if vmhost_core_files:\n for c in vmhost_core_files:\n # Need to first check to make sure the core is new before copying\n date = c[1]\n date += ' ' + year\n epoch_time = float(time.mktime(time.strptime(date, \"%b %d %H:%M %Y\")))\n self.log(level='info', message='Core file timestamp '\n 'on vmhost : ' + date+' :: epoch time : '+ str(epoch_time))\n # if epoch_time is not None and epoch_time >= dev_start_time:\n coresize_name_timestamp = \"|\".join(c)\n if coresize_name_timestamp not in t.vmhostcore_name_timestamp_list:\n cmd = \"request vmhost file-copy crash from-jnode \" + c[2] + \" to-vjunos /var/tmp/\" + c[2]\n self.cli(command=cmd, format='txt')\n t.vmhostcore_name_timestamp_list.append(coresize_name_timestamp)\n core_del = self.cli(command='request vmhost cleanup')\n else:\n self.log(level=\"INFO\", message=\"No core(s) copied from vmhost\")\n\n core_raw_output = self.cli(command=core_cmd, format='text').response()\n\n # If 'show system core-dumps' isn't available then default on searching\n # various core paths\n if re.search(r'syntax error', str(core_raw_output), re.I):\n exec_core_path = 'ls -ltd ' + core_path\n core_raw_output = self.shell(command=exec_core_path).response()\n fallback_method = True # this way we know to not use CLI command later\n else:\n # Make sure to search any paths that are not covered by\n # 'show system core-dumps' but are included in the core_path provided\n core_path_list = core_path.split(' ')\n search = re.findall(r'(/.*)(\\*core\\*)|(/.*/)(.*core.*)', str(core_raw_output))\n set_of_core_paths = set()\n # add all the paths from 'show system core-dumps' to a set\n for path in search:\n if path[0]:\n set_of_core_paths.add(path[0]+\"*core*\")\n if path[2]:\n set_of_core_paths.add(path[2]+\"*core*\")\n # if 'show system core-dumps' has any path from 'core_path' remove it\n for path in set_of_core_paths:\n if path in core_path_list:\n core_path_list.remove(path)\n core_path = ' '.join(core_path_list)\n # if any paths left add them to the search results\n if core_path:\n exec_core_path = 'ls -ltd ' + core_path\n additional_core = self.shell(command=exec_core_path).response()\n core_raw_output += \"\\n\" + additional_core\n\n # Checks file permission once. If any still contain only 'user' permissions then we cannot be sure\n # the cores have been dumped so we must monitor the file size (times out after 3 minutes)\n if re.search(r'-rw-------', str(core_raw_output), re.M):\n initial_time = time.time() # take initial timestamp\n core_raw_output = get_cores(fallback_method, core_path)\n\n cores_still_dumping = True\n timeout = 900\n # Stores all the core files in a dictionary with key based on core path\n # with value as the size of the core\n core_files = re.findall(r'(\\d*)\\s[a-zA-Z]{3}\\s[\\s\\d]{2}\\s+[:\\d]+\\s+(\\S*)', str(core_raw_output))\n # If there are no core files we will skip checking for size\n if not core_files:\n cores_still_dumping = False\n else:\n cores_dict = {}\n for core in core_files:\n cores_dict[core[1]] = core[0]\n\n # Since now the core should have dumped since 'others' has permission on the file\n # we want to do a final check to ensure the core has dumped by\n while cores_still_dumping and (time.time() <= (initial_time + timeout)):\n self.log(level=\"INFO\", message=\"Core(s) are still currently being dumped. Please wait for 10 seconds \"\n \"to see if all cores have dumped by then.\\nDumping of core(s) will be \"\n \"checked for a maximum of 15 minutes.\")\n core_raw_output = get_cores(fallback_method, core_path)\n # This regex detects the size and path of each core and stores them in a dictionary\n core_files = re.findall(r'(\\d*)\\s[a-zA-Z]{3}\\s[\\s\\d]{2}\\s+[:\\d]+\\s+(\\S*)', str(core_raw_output))\n cores_dict_new = {}\n for core in core_files:\n cores_dict_new[core[1]] = core[0]\n\n # This checks to see if the size of each core has changed at all\n for core in cores_dict:\n # If the file name has changed we need to take another snapshot\n if core not in cores_dict_new:\n cores_still_dumping = True # this may have been set to False last loop iteration\n time.sleep(10)\n break\n # If the size is not the same of any core we wait 5 seconds and try again\n elif cores_dict[core] != cores_dict_new[core]:\n cores_still_dumping = True # this may have been set to False last loop iteration\n time.sleep(10)\n break\n # This means the file size has not changed\n else:\n cores_still_dumping = False\n\n # set the current cores to the old dict for comparison in next loop\n cores_dict = cores_dict_new\n\n # If all the file sizes have not changed means all the cores have dumped however during the last\n # comparison another core may have started dumping. For this reason we want to check one last time\n # if there are any new cores. If so we will continue checking until timeout is reached\n if not cores_still_dumping:\n core_raw_output = get_cores(fallback_method, core_path)\n core_files = re.findall(r'(\\d*)\\s[a-zA-Z]{3}\\s[\\s\\d]{2}\\s+[:\\d]+\\s+(\\S*)', str(core_raw_output))\n # Need to store in dictionary before measuring lengths to avoid duplicate core entries\n cores_dict_temp = {}\n for core in core_files:\n cores_dict_temp[core[1]] = core[0]\n # If there is a new core then need to store the newest snapshot as the old, then\n # run the loop again and take another new snapshot to compare to the old\n if len(cores_dict_temp) > len(cores_dict):\n cores_dict = cores_dict_temp # store the newly found core(s) as older snapshot\n cores_still_dumping = True # reset\n\n # If cores do not dump within 15 minutes, issue a warning\n if (time.time() > (initial_time + timeout)) and cores_still_dumping:\n self.log(level='WARN', message=\"Core(s) have not finished dumping even after waiting for 15 minutes\")\n\n # if (self.get_vmhost_infra()): # This will be True if the device belongs to Mt_Rainier(vmhost infra)\n # host_core_path = '/var/crash/*core*'\n # host_core_path = 'vhclient ls -ltd \\\"' + host_core_path + '\\\"'\n # self.su()\n # host_core = self.shell(command=host_core_path).response()\n # core_raw_output = core_raw_output + '\\n' + host_core\n\n self.log(level='DEBUG', message='Stage : %s ' %t._stage)\n self.log(level='DEBUG', message='Test Stage : %s ' %t._test_stage)\n\n if t._test_stage:\n t._stage = t._test_stage\n self.log(level='DEBUG', message='Stage : %s ' %t._stage)\n hstname = self.controllers_data['hostname']\n\n for line in re.split('\\n', str(core_raw_output)):\n result = re.search(r'^(\\S+)\\s+\\d+\\s+\\S+\\s+\\S+\\s+(\\d+)\\s+(\\S+\\s+\\d+'\n r'\\s+\\d+\\:\\d+|\\d+)\\s+(\\S+)', line, re.I)\n if result:\n permission, size, date, core_name = result.groups()\n\n core_rename = 're0'\n if hasattr(self, 're_name'):\n if not self.re_name == str(None): core_rename = self.re_name\n\n if not int(size):\n if exclude_pattern and not re.search(exclude_pattern, core_name):\n self.log(level='WARN', message='Core has zero size : ' +\n core_name)\n elif core_name is None:\n continue\n\n elif re.search(r'trap_fpc\\S+\\.core\\.\\d+', core_name, re.I):\n continue\n elif re.search(r'ttrace_fpc\\S+\\.core\\.\\d+', core_name, re.I):\n continue\n elif re.search(r'\\/gcov-\\S+\\.core\\.\\d+', core_name, re.I):\n continue\n elif re.search(r'(\\/cores\\/)', core_name):\n continue\n\n if not re.search(r'\\.(?:\\d+|[t]*gz)$', core_name, re.I):\n self.log(level='DEBUG',\n message='Seems some core pattern found but not compressed to tgz or gz '\n 'format : ' + core_name)\n self.log(level='DEBUG', message=\"Checking whether '%s' is a file type or not..\" % core_name)\n if re.search(r'^-', permission):\n self.log(level='WARN', message='Found uncompressed core : ' + core_name)\n else:\n self.log(level='INFO',\n message='Skipping %s since it is neither in file format nor in '\n 'tgz format ' % core_name)\n continue\n\n res = re.search(r'(.*\\/)(.*)', core_name)\n if res:\n core_found_path, core_filename = res.group(1), res.group(2)\n else:\n continue\n date += ' ' + year\n epoch_time = float(time.mktime(time.strptime(date, \"%b %d %H:%M %Y\")))\n core_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch_time))\n self.log(level='info', message='Core file timestamp '\n 'on router :' + core_timestamp+ \" :: epoch time :\"+str(epoch_time))\n else:\n continue\n\n # if epoch_time is not None and epoch_time >= dev_start_time:\n stage = t._stage\n if system_name: resource_id = system_name\n\n if not resource_id in t.cores:\n t.cores[resource_id] = {}\n\n if resource_id in t.cores:\n if not hstname in t.cores[resource_id]:\n t.cores[resource_id][hstname] = {}\n if not 'core_full_path_list' in t.cores[resource_id][hstname]:\n t.cores[resource_id][hstname]['core_full_path_list'] = []\n\n\n core_exist = core_filename + core_rename if (self.evo) \\\n else core_filename + '|' + date + '|' + core_rename\n if not exclude_pattern or (exclude_pattern and not re.search(exclude_pattern, core_filename)):\n self.log(level='INFO',\n message='Core is found, checking whether the core (%s) is logged or not ' % core_exist)\n self.log(level='DEBUG', message='t.core_list : %s' % t.core_list)\n\n if resource_id in t.core_list and core_exist in t.core_list[resource_id]:\n self.log(level='INFO', message='Core is already logged (%s) ' % core_exist)\n continue\n else:\n self.log(level='DEBUG',\n message='Core is not logged (%s). Adding to the list of the cores found ' % core_exist)\n\n self.log(level='INFO', message='t.core_list : %s' % t.core_list)\n core_count += 1\n core_id = 'core' + str(core_count) + core_rename\n core_full_path = None\n if stage and system_name:\n t.core[stage][resource_id][core_id] = defaultdict(dict)\n t.core[stage][resource_id][core_id]['core_name'] = core_filename\n t.core[stage][resource_id][core_id]['core_src_path'] = core_found_path\n t.core[stage][resource_id][core_id]['core_timestamp'] = core_timestamp\n t.core[stage][resource_id][core_id]['core_size'] = size\n t.core[stage][resource_id][core_id]['core_re'] = core_rename\n if re1_hostname is not None: t.core[stage][resource_id][core_id]['host1'] = re1_hostname\n\n core_full_path = core_found_path + core_filename\n t.core_list[resource_id].append(core_exist)\n\n self.log(level=\"DEBUG\", message=\"Creating core path for the resource \"\n \"in t.cores : \" + resource_id + \" and list is \" + core_full_path)\n t.cores[resource_id][hstname]['core_full_path_list'].append(core_full_path)\n\n self.log(level='info', message='Core [' + core_name + ']: ' + size + ' bytes added to the core list.')\n self.log(level='info', message='Core found (' + core_name + ';' + core_timestamp + ')')\n else:\n self.log(level='info', message='Skipping core '+ core_filename + ' as exclude pattern is set '\n 'to :: '+ str(exclude_pattern))\n # else:\n # self.log(level='info', message='Skipping core '\n # '' + core_name + ' as the core is found before script started.')\n except Exception as err:\n raise TobyException(\"Error in detect_core \" + str(err), host_obj=self)\n return core_count"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save and if applicable logs a message to the `reports` table. Only logs the messages up to the cutoff set by configuration `provenance_report_cutoff`
|
def insert_report(self, message):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO reports(message)
VALUES(?)
""", [message])
recorded = cur.lastrowid
cutoff = get_config_int("Reports", "provenance_report_cutoff")
if cutoff is None or recorded < cutoff:
logger.warning(message)
elif recorded == cutoff:
logger.warning(f"Additional interesting provenance items in "
f"{self._database_file}")
|
[
"def save_report():\n ct.save_report()",
"def persist_report():",
"async def report(self, ctx, *, report = None):\n if not report:\n raise CustomPermissionError\n try:\n await ctx.bot.log.send(embed = await Macro.Embed.infraction(\n f\"{ctx.author.name} from {ctx.guild} said this:\\n{report}\"))\n except Exception as error:\n await ctx.send(embed = await Macro.send(\"The report was not sent\"))\n raise error\n await ctx.send(embed = await Macro.send(\"The report has been sent\"))",
"def writeReport(self, content):\n self.finalReport.write( content )",
"def report(msg):\n if self.need_report:\n self.reporter.report(msg)",
"def insertReport(cfgInterface):\n if not hasService(cfgInterface):\n cfgInterface.cmsConfig.psdata['services']['MessageLogger'] = {\n '@classname': ('string', 'tracked', 'MessageLogger'),\n }\n loggerSvc = cfgInterface.cmsConfig.service(\"MessageLogger\")\n if not loggerSvc.has_key(\"fwkJobReports\"):\n loggerSvc['fwkJobReports'] = (\"vstring\", \"untracked\", [])\n\n loggerSvc['fwkJobReports'][2].append(\"\\\"FrameworkJobReport.xml\\\"\")\n return",
"def log_report(self, name: str, content: str) -> Optional[str]:\n file_name = os.path.join(LOCAL_REPORTS_DIR, name + \".log\")\n\n with open(file_name, \"w\") as wfh:\n wfh.write(content)",
"def save_report(self, report):\n \n # Assuming that json is used to transfer data between RF and TOOLS\n self.variable.update(report)",
"def save_log(self, message, raw_msg, time, level, user_prefix):\n errmsg = []\n for key in self._hooks:\n try:\n hook = self._hooks[key]\n if level in hook['levels']:\n hook['target'](\n message=message,\n raw_msg=raw_msg,\n time=time,\n level=level,\n user_prefix=user_prefix,\n **hook['kwargs']\n )\n except Exception as e:\n errmsg.append(e)\n\n if '--no-log' not in self.P.argv:\n with open(self.cfg['log_file'], 'ab') as f:\n f.write(''.join([message, '\\n']).encode('utf-8'))\n for message in errmsg:\n f.write(''.join([message, '\\n']).encode('utf-8'))",
"def save_log_data(\n log_data: Union[pd.DataFrame, \"LogData\"],\n path: path_t,\n subject_id: Optional[str] = None,\n overwrite: Optional[bool] = False,\n show_skipped: Optional[bool] = False,\n):\n from biopsykit.carwatch_logs import LogData # pylint: disable=import-outside-toplevel\n\n if isinstance(log_data, pd.DataFrame):\n if isinstance(log_data.index, pd.MultiIndex):\n # dataframe has a multiindex => it's a combined dataframe for all subjects\n log_data.to_csv(path, sep=\";\")\n return\n log_data = LogData(log_data)\n\n if subject_id is None:\n subject_id = log_data.subject_id\n\n export_path = path.joinpath(f\"logs_{subject_id}.csv\")\n if not export_path.exists() or overwrite:\n log_data.data.to_csv(export_path, sep=\";\")\n elif show_skipped:\n print(f\"Skipping subject {subject_id}. Already exported.\")",
"def record_results(self, script):\n if self.ret.get(\"status\", None) in [\"ok\", \"up\"]:\n results = self.calculation_results(script)\n\n score = self.message.get(\"score\") if results else 0\n\n else:\n results = False\n score = 0\n\n scene_id = self.message.get(\"scene_id\")\n cr_event_scene = cr_scene_models.CrEventScene.objects.filter(cr_scene_instance=scene_id).first()\n if cr_event_scene:\n cr_scene_models.CrSceneMissionCheckLog.objects.create(\n mission_id=self.message[\"id\"],\n score=float(self.message[\"score\"]),\n cr_event=cr_event_scene.cr_event if cr_event_scene.cr_event else None,\n is_solved=results,\n target_ip=self.message.get('target_ip'),\n script=self.message.get('script'),\n )\n else:\n cr_scene = cr_scene_models.CrScene.objects.filter(scene_id=scene_id).first()\n\n cr_scene_models.CmsTestCheckLog.objects.create(\n mission_id=self.message.get(\"id\"),\n target_ip=self.message.get('target_ip'),\n cr_scene=cr_scene if cr_scene else None,\n is_solved=results,\n score=score,\n script=script,\n )\n\n self.logger.debug(\"Mission[%s]: Write data to the log table\", self.title)",
"def save_log(self, path):\n self.log_df.to_csv(path, index=False)",
"def _save_logs_for_power_test(self, monsoon_result):\n current_time = get_current_human_time()\n file_name = \"%s_%s\" % (self.current_test_name, current_time)\n monsoon_result.save_to_text_file(\n [monsoon_result], os.path.join(self.monsoon_log_path, file_name))\n\n self.ad.take_bug_report(self.current_test_name, current_time)",
"def delivery_report(case_id, report_path, update):\n\n adapter = store\n\n try:\n load_delivery_report(\n adapter=adapter,\n case_id=case_id,\n report_path=report_path,\n update=update,\n )\n LOG.info(\"saved report to case!\")\n except Exception as err:\n LOG.error(err)\n raise click.Abort()",
"def add_report(self):\n\n session = db.get_session()\n report = {\n \"mark\": int(self.lab_mark.text()),\n \"mark_date\": to_datetime(self.de_mark_date.date()),\n \"report_type\": str(self.lab_report_type.text()),\n \"discipline\": session.query(Discipline).filter(\n Discipline.id == int(self.lab_discipline_id.text())),\n \"student\": session.query(Student).filter(\n Student.id == int(self.lab_student.text()))\n }\n\n if not all(report.values()):\n required_field_empty_warning(self)\n else:\n db.insert_objects(Report(**report))",
"def write_processlog(self, level, message, event=None, solution_num=None,\n process_id=None, scan_id=None, plate_id=None,\n archive_id=None):\n\n col_list = ['process_id', 'scan_id', 'plate_id', 'archive_id',\n 'level', 'event', 'solution_num', 'message']\n val_tuple = (process_id, scan_id, plate_id, archive_id,\n level, event, solution_num, message)\n col_str = ','.join(col_list)\n val_str = ','.join(['%s'] * len(col_list))\n sql = ('INSERT INTO {} ({}) VALUES ({})'\n .format(self.table_name('process_log'), col_str, val_str))\n self.db.execute_query(sql, val_tuple)",
"def saveLogResults(self):\n try:\n # print(csvReportFolder)\n logPicklePath = os.path.join(self.getCurrentCsvReportFolder(), 'LogResults.pkl')\n with open(logPicklePath, 'wb') as f:\n pickle.dump(self.logDict, f)\n except:\n print(traceback.format_exc())",
"def saveConsoleLog(self,path=None):\n if path==None: path = self.env.getReportLogDir() + '/consoleLog.txt'\n with open(path, 'a') as f: \n f.write(self.consoleLog)",
"def save(cls):\n\n # Retrieve the save directory from settings\n logdir = Settings.get_logdir()\n # Create the save directory if necessary\n if not logdir.exists():\n os.makedirs(logdir)\n\n # Retrieve the full save path from settings\n logpath = Settings.get_logpath()\n # Write the log out to the file.\n with logpath.open(\"w\", encoding=\"utf-8\") as file:\n for entry in cls._log.values():\n file.write(\n (\n f\"{entry.timestamp_as_string()}|\"\n f\"{entry.duration_as_string()}|\"\n f\"{entry.notes}\\n\"\n )\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inserts edge data into the `connector_provenance`
|
def insert_connector(
self, pre_population, post_population, the_type, description,
the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT OR IGNORE INTO connector_provenance(
pre_population, post_population, the_type, description,
the_value)
VALUES(?, ?, ?, ?, ?)
""",
[pre_population, post_population, the_type, description,
the_value])
|
[
"def append_edge(self, edge):",
"def insert_gatherer(self, x, y, address, bytes_read, run, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO gatherer_provenance(\n x, y, address, bytes, run, description, the_value)\n VALUES(?, ?, ?, ?, ?, ?, ?)\n \"\"\", [x, y, address, bytes_read, run, description, the_value])",
"def test_graph_edge_data_added(dataset1_PropertyGraph):\n from cugraph.experimental import PropertyGraph\n\n pG = dataset1_PropertyGraph\n eicn = PropertyGraph.edge_id_col_name\n\n expected_num_edges = \\\n len(dataset1[\"transactions\"][-1]) + \\\n len(dataset1[\"relationships\"][-1]) + \\\n len(dataset1[\"referrals\"][-1])\n\n assert pG.num_edges == expected_num_edges\n\n # extract_subgraph() should return a directed Graph object with additional\n # meta-data, which includes edge IDs.\n G = pG.extract_subgraph(create_using=DiGraph_inst, allow_multi_edges=True)\n\n # G.edge_data should be set to a DataFrame with rows for each graph edge.\n assert len(G.edge_data) == expected_num_edges\n edge_ids = sorted(G.edge_data[eicn].values)\n\n assert edge_ids[0] == 0\n assert edge_ids[-1] == (expected_num_edges - 1)",
"def add_edge(self, edge):\n\t\tedge = set(edge)\n\t\t(vertex, neighbor) = tuple(edge)\n\t\tif vertex not in self.g:\n\t\t\tself.g[vertex] = [neighbor]\n\t\telse:\n\t\t\tself.g[vertex].append(neighbor)\n\t\tprint \"Added Edge : {}\".format(edge)",
"def insert_board_provenance(self, connections):\n if not connections:\n return\n with self.transaction() as cursor:\n cursor.executemany(\n \"\"\"\n INSERT OR IGNORE INTO boards_provenance(\n ethernet_x, ethernet_y, ip_addres)\n VALUES (?, ?, ?)\n \"\"\", ((x, y, ipaddress)\n for ((x, y), ipaddress) in connections.items()))",
"def add_edge(self, ed):\n self.edge.append(ed)\n\n\t# This one creates a new edge and adds it to the tree.",
"def test_add_edge_data(df_type):\n from cugraph.experimental import PropertyGraph\n\n transactions = dataset1[\"transactions\"]\n transactions_df = df_type(columns=transactions[0],\n data=transactions[1])\n\n pG = PropertyGraph()\n pG.add_edge_data(transactions_df,\n type_name=\"transactions\",\n vertex_col_names=(\"user_id\", \"merchant_id\"),\n property_columns=None)\n\n assert pG.num_vertices == 7\n assert pG.num_edges == 4\n expected_props = [\"merchant_id\", \"user_id\",\n \"volume\", \"time\", \"card_num\", \"card_type\"]\n assert sorted(pG.edge_property_names) == sorted(expected_props)",
"def add_edge(self, edge, edgetype=1):\n self.add_edges([edge], edgetype)",
"def add_edge(self, edge_key, edge_value):\n self.edge_list.append(edge_value)\n self.edge_dict[edge_key] = (edge_value.__len__(), self.edge_list.__len__() - 1)\n self.connection.append(edge_key)",
"def addEdge(self,edge):\r\n self.adj.append(edge)",
"def test_insertion():\n vep_headers = [\"Allele\", \"Feature_type\", \"Consequence\", \"SYMBOL\"]\n annotation = [\n \"ACC|Transcript|downstream_gene_variant|NOC2L\", \n ]\n \n vep_dict = build_vep_annotation(\n csq_info=annotation, \n reference='C', \n alternatives=['CACC'], \n vep_columns=vep_headers\n )\n \n assert vep_dict['CACC'] == [\n {'Allele': 'ACC',\n 'Consequence': 'downstream_gene_variant',\n 'Feature_type': 'Transcript',\n 'SYMBOL': 'NOC2L'\n }\n ]",
"def insert_edge(self, edge: DirectedEdge) -> None:\n for elem in self.edges:\n if elem == edge:\n raise ValueError(f'Edge id already exists in the graph.')\n\n try:\n self.insert_vertex(edge.vertex_a)\n except ValueError:\n pass\n\n try:\n self.insert_vertex(edge.vertex_b)\n except ValueError:\n pass\n\n if edge.id > self._current_highest_edge_id:\n self._current_highest_edge_id = edge.id\n self.edges.append(edge)",
"def dt_add_edge(self,e):\n a,b = self.edges[e,:2]\n\n #-#-# Try to anticipate unsafe connections \n for i in range(3): # try a few times to adjust the conflicting nodes\n constr_edges = self.check_line_is_clear(a,b)\n if len(constr_edges)>0:\n print(\"--=-=-=-=-=-= Inserting this edge %d-%d will cause an intersection -=-=-=-=-=-=-=--\"%(a,b))\n for v1,v2 in constr_edges:\n # use %s formats as values could be None\n print(\" intersects constrained edge: %s - %s\"%(self.vh_info[v1],self.vh_info[v2]))\n\n if self.verbose > 1:\n if i==0:\n self.plot(plot_title=\"About to prepare_conflicting_edges\")\n plt.plot(self.points[[a,b],0],\n self.points[[a,b],1],'m')\n\n # Debugging:\n # raise Exception(\"Stopping before trying to fix conflicting edges\")\n self.prepare_conflicting_edges(e,constr_edges)\n else:\n break\n #-#-#\n\n self.safe_insert_constraint(a,b)\n\n if a>b:\n a,b=b,a\n\n if self.verbose > 2:\n print(\" dt_add_edge: adding constraint %d->%d\"%(a,b))\n self.check()",
"def update_edge(self, e):\n orig_prop = self._g.adj.get(\n e.source_id, {}).get(\n e.target_id, {}).get(\n e.label, None)\n if not orig_prop:\n self._add_edge(e)\n return\n self._g.adj[e.source_id][e.target_id][e.label].update(e.properties)\n for prop, value in e.properties.items():\n if value is None:\n del self._g.adj[e.source_id][e.target_id][e.label][prop]",
"def append(self, edge):\n self.agenda.append(edge)\n self.total += 1",
"def add_edge(self, node1, label, node2, id=None):\n if id is None:\n id = self.make_object_id('e-', node1, label, node2)\n table = self.table\n columns = self.column_list\n stmt = f'INSERT INTO {table} ({columns}) VALUES (?,?,?,?)'\n self.store.execute(stmt, (node1, label, node2, id))",
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph",
"def add_edge(self, v_from, v_to):\n self.v_sources.add(v_from)\n self.v_stocks.add(v_to)\n if v_from in self.edges:\n self.edges[v_from].append(v_to)\n else:\n self.edges[v_from] = [v_to,]",
"def _connect_boundary_edges(self, D, derived_edges, overlap_edges):\n for i, edge in enumerate(derived_edges):\n if edge.layer.purpose == RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED:\n c_edge = deepcopy(edge)\n edge.external_pid = edge.id_string()\n edge.layer.purpose = RDD.PURPOSE.PORT.OUTSIDE_EDGE_ENABLED\n overlap_edges[c_edge] = [edge]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write the connection details retrieved from spalloc_client job to the `boards_provenance` table.
|
def insert_board_provenance(self, connections):
if not connections:
return
with self.transaction() as cursor:
cursor.executemany(
"""
INSERT OR IGNORE INTO boards_provenance(
ethernet_x, ethernet_y, ip_addres)
VALUES (?, ?, ?)
""", ((x, y, ipaddress)
for ((x, y), ipaddress) in connections.items()))
|
[
"def save_process_table(self):\n if self.procs != None:\n f = open(self.proctable_path, 'w')\n f.write(self.processtable_header)\n for id in self.procs.keys():\n proc = self.procs[id]\n f.write(self.processtable_line % (id, proc['product'], proc['product_energy'],\n proc['time']))\n f.close()",
"def savepacs(pacout):\n if len(pacout) == 0: return\n\n print \"saving \",len(pacout),\"pacs to\",outtable\n put = conn.cursor()\n put.executemany(\n format(PAC.insert_statement(outtable, schema)),\n pacout\n )\n put.close()\n conn.commit()",
"def _propane(args):\n propane.record(args.database, args.port, args.ssl, args.username, args.password)",
"def dumpCooSchema(self):\n self.logger.log(\"Begin to dump coordinator schema...\")\n \n try:\n if (len(self.dbNodeInfo.coordinators) == 0):\n self.logger.logExit(\"There is no coordinator on local node!\")\n cooInst = self.dbNodeInfo.coordinators[0]\n \n if(os.path.exists(self.schemaCoordinatorFile)):\n try:\n os.remove(self.schemaCoordinatorFile) \n except:\n pass \n cmd = \"gs_dumpall -p %d -s --include-nodes --dump-nodes --include-buckets --dump-wrm --file=%s\" % \\\n (cooInst.port, self.schemaCoordinatorFile)\n self.logger.debug(\"Dump coordinator command:%s\" % cmd)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Dump the schema of coordinator failed!Output: %s\" % output)\n \n ips = \",\".join(cooInst.listenIps)\n cmd = \"echo \\\"ALTER NODE cn_%d WITH (HOST = '%s', HOST1 = '%s');\\\" >> %s\" % (cooInst.instanceId, ips, ips, self.schemaCoordinatorFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Append alter sql failed!Output: %s\" % output)\n self.cleanSchemaFile(\"coordinator\", self.schemaCoordinatorFile)\n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Dump coordinator schema successfully.\")",
"def publishSwarmingStatusToDb(connection, areaId, status):\n if status is True:\n inProgress = 1\n else:\n inProgress = 0\n\n cursor = connection.cursor()\n\n log.info(\"Adding swarm record to db for area %s, as %s\" % (areaId, status))\n cursor.execute(predictions_run_sql.insertSwarmingForAreaRecord, (areaId, inProgress))",
"def write(self, conf, conn):\n db = conn['warehouse']\n if self.table not in db.tables:\n if conf['ENV'] in ['development', 'testing']:\n table = db.create_table(\n self.table,\n primary_id=self.__primary_key,\n primary_type=fields.Text.column_type)\n # work around a bug whereby the table is not persisted\n table.table\n table = db[self.table]\n action = 'created'\n if table.find_one(**{self.__primary_key: self[self.__primary_key]}):\n action = 'updated'\n del self['meta_id']\n\n ensure_fields = False\n if conf['ENV'] in ['development', 'testing']:\n ensure_fields = True\n\n table.upsert(self, [self.__primary_key], ensure=ensure_fields, types=self.__column_types)\n\n logger.debug('Record - %s: %s - %s fields', action, self, len(self))",
"def property_cards_staging_update():\r\n LOG.info(\"Start: Update assessor property card staging repository.\")\r\n start_time = datetime.datetime.now()\r\n source_paths = document.repository_file_paths(path.LANE_PROPERTY_CARDS)\r\n conn = credential.UNCPathCredential(\r\n path.RLID_DATA_STAGING_SHARE, **credential.RLID_DATA_SHARE\r\n )\r\n with conn:\r\n count = Counter()\r\n for source_path in source_paths:\r\n staging_path = os.path.join(\r\n REPO_PATH[\"property-card-staging\"], os.path.basename(source_path)\r\n )\r\n if document.changed(staging_path, source_path):\r\n result_key = document.update_document(source_path, staging_path)\r\n count[result_key] += 1\r\n LOG.info(\"End: Update.\")\r\n document.log_state_counts(count, documents_type=\"property cards (staging)\")\r\n elapsed(start_time, LOG)",
"def test_jdbc_producer_insert(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = create_table_in_database(table_name, database)\n\n DATA = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n pipeline = create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')\n sdc_executor.add_pipeline(pipeline.configure_for_environment(database))\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))\n sdc_executor.stop_pipeline(pipeline)\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)",
"def send_table_construction_notification(self):\n\n send_data(\"tables-done\", self.root_connection)",
"def store_all_to_database(self, session):\n\n description = 'Campus Property Management was founded in 1967 by Champaign resident and University of Illinois alumnus Erwin Goldfarb. Recognizing the need for expanded housing around the University, Erwin decided to start his own leasing company. Starting off with just one building, Erwin took pride in providing the best customer service to his tenants - even going so far as to lay carpet and painting apartments himself! Growing steadily through the years, the company built its first building from start to finish in 1983 and officially became Campus Property Management in 1988. Since those early years, CPM has grown to include 1,850 apartments which are home to 4,500 tenants each year! As our business continues to grow, we remain committed to our core values of integrity, commitment, innovation, opportunity and service. We are dedicated to providing comfortable and affordable housing with great customer service while continuing to be a proud part of the Illini community and giving back whenever we can!'\n\n # Insert a CPM company instance into the database\n current_company = Company(\n name='CPM',\n baseurl = 'http://www.cpm-apts.com/',\n description = description\n )\n session.add(current_company)\n\n # Iterate over the apartments, storing each in the database\n for apartment in self.apartment_data:\n logging.info(\"Inserting %s to database\", apartment['name'])\n new_apartment = Apartment(\n company=current_company,\n url=apartment['url'],\n name=apartment['name'],\n bedrooms=apartment['bedrooms'],\n bathrooms=apartment['bathrooms'],\n price=apartment['price'],\n leasing_period=apartment['leasing_period'],\n description=apartment['description'],\n address=apartment['address'],\n lat=apartment['lat'],\n lng=apartment['lng']\n )\n session.add(new_apartment)\n\n # Insert images for the given apartment\n for index, image_url in enumerate(apartment['image_urls']):\n new_image = Image(\n url=image_url,\n apartment_id=new_apartment.id,\n type=0,\n image_index=index\n )\n session.add(new_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_image)\n\n # Insert floorplan image, if it exists\n if apartment['floorplan_url'] != 0:\n new_floorplan_image = Image(\n url=apartment['floorplan_url'],\n apartment_id=new_apartment.id,\n type=1,\n image_index=len(apartment['image_urls'])\n )\n session.add(new_floorplan_image)\n\n # Connect floorplan to apartment\n new_apartment.images.append(new_floorplan_image)\n\n # Insert amenities for the given apartment\n for amenity in apartment['amenities']:\n new_amenity = Amenity(\n apartment_id=new_apartment.id,\n amenity=amenity\n )\n session.add(new_amenity)\n\n # Connect amenity to apartment\n new_apartment.amenities.append(new_amenity)\n # Write all the queries to database\n session.commit()",
"def test_jdbc_producer_update(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = create_table_in_database(table_name, database)\n logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)\n connection = database.engine.connect()\n connection.execute(table.insert(), ROWS_IN_DATABASE)\n\n DATA = '\\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n pipeline = create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Update', DATA, table_name, 'UPDATE')\n sdc_executor.add_pipeline(pipeline.configure_for_environment(database))\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))\n sdc_executor.stop_pipeline(pipeline)\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n updated_names = {record['id']: record['name'] for record in ROWS_IN_DATABASE}\n updated_names.update({record['id']: record['name'] for record in ROWS_TO_UPDATE})\n assert data_from_database == [(updated_names[record['id']], record['id']) for record in ROWS_IN_DATABASE]\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)",
"def write(self):\n\n if endpoints[self.endpoint]['on']:\n if endpoints[self.endpoint]['re_upload']:\n # NB: please ensure the table exists in the dataset, or this will fail\n self.bgq_obj.delete_table() # if re-upload is on, delete the table.\n\n # Overwrite with sorted list\n # (little hack - takes longest lines first therefore most likely to contain all sub-fields):\n with open(self.endpoint+'.json') as f:\n content = f.readlines()\n\n sorted_list = sorted(content, key=len, reverse=True)\n\n try:\n os.remove(self.endpoint+\".json\")\n except OSError:\n pass\n\n for i in range(len(sorted_list)):\n with open(self.endpoint+'.json', 'a') as outfile:\n json.dump(json.loads(sorted_list[i]), outfile)\n outfile.write('\\n')\n\n self.bgq_obj.local_json_bigquery(self.endpoint+\".json\", autodetect=False, schema=self.table_schema)",
"def writeToMySQL(self, connection):\n pass # TODO -- write",
"def dump_snp_positions(snp_tab, chrm_id, pos_out): \n for (pos, ref, alt) in snp_tab:\n pos_out.write('%s\\t%s\\n' % (chrm_id, pos))\n return",
"def saveAllocString2Setup(self, setup):\n setup.qalloc_string = f\"Global rho: {1. / self.global_scale ** 2}\\n\" \\\n f\"Global epsilon: {self.total_budget}\\n\" \\\n f\"delta: {self.delta}\\n\" \\\n \"Geolevel allocations:\\n\" + \\\n str([f\"{k}: {str(v)}\" for k, v in self.geolevel_prop_budgets_dict.items()]) + \\\n \"\\nWithin-geolevel query allocations:\\n\" + str(self.query_budget.allocation_df.to_csv())",
"def write_table(self):\n with open(self.table_file, 'w') as table_f:\n pickle.dump(self.pollster_table, table_f)",
"def save_toga_info_tab(out_dir, projections_list, proj_to_q_coords,\n ref_trans_to_region, proj_to_chain_score,\n proj_to_chain_features, projection_to_loss_class):\n toga_info_tab_path = os.path.join(out_dir, \"togaInfo.tab\")\n print(\"Saving TOGAInfo tab file\")\n f = open(toga_info_tab_path, \"w\")\n for projection in projections_list:\n trans, chain = split_proj_name(projection)\n glp_class = projection_to_loss_class[projection]\n # default 0.5 for fragmented assemblies\n chain_score = proj_to_chain_score.get(projection, 0.5)\n query_region = proj_to_q_coords[projection]\n ref_region = ref_trans_to_region[trans]\n chain_feats = proj_to_chain_features.get(projection, FRAGM_FEATS)\n # parse chain ml features\n synteny = chain_feats[0]\n flank = chain_feats[1]\n gl_exo = chain_feats[2]\n loc_exo = chain_feats[3]\n exon_cov = chain_feats[4]\n intr_cov = chain_feats[5]\n tab_row = (projection, trans, ref_region, query_region, chain_score,\n synteny, flank, gl_exo, loc_exo, exon_cov, intr_cov, glp_class)\n tab_strs_ = map(str, tab_row)\n f.write(\"\\t\".join(tab_strs_))\n f.write(\"\\n\")\n print(f\"Saved togaInfo tab at {toga_info_tab_path}\")\n f.close()",
"def produce(self):\n for network in self.subnets:\n output = ''\n for address in self.network.addresses():\n if ipaddress.ip_address(address.ip) in network:\n for entry in address.ns_entries.filter(type='PTR'):\n reversed_ip = ipaddress.ip_address(address.ip).reverse_pointer\n output += '{}. IN {} {}.{}. ; {}\\n'.format(reversed_ip, entry.type,\n entry.name,\n entry.domain.name,\n address.creation_date)\n filename = '{}/{}.db'.format(self.directory,\n str(network.network_address).replace(':', '.'))\n with open(filename, 'w') as lock_file:\n locks.lock(lock_file, locks.LOCK_EX)\n lock_file.write(output)\n lock_file.close()\n self.update_soa()",
"def log_message(dbconn,msg_dict):\n dbconn.insert('messages',backend_id=msg_dict['backend_id'], msg_out=msg_dict['msg_out'],\n status_out=msg_dict['status_out'], destination=msg_dict['destination'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
THIS IS A TESTING METHOD. This will lock the database and then try to do a log
|
def _test_log_locked(self, text):
with self.transaction() as cur:
# lock the database
cur.execute(
"""
INSERT INTO reports(message)
VALUES(?)
""", [text])
cur.lastrowid # pylint: disable=pointless-statement
# try logging and storing while locked.
logger.warning(text)
|
[
"def db_lock_action(self): # pragma: no cover\n pass",
"def test_default_connection_details_value():\n RedLock(\"test_simple_lock\")",
"def test_lock_account_user(self):\n pass",
"def mustlock(self):\n pass",
"def realopen(self,ro=False):\n\n#\t\tprint \"open \",self.name\n\n\t\tglobal DBDEBUG\n\t\tif DBDEBUG:\n\t\t\twhile not self.lock.acquire(False) :\n\t\t\t\tprint\"DB %s locked. Waiting\"%self.name\n\t\t\t\ttime.sleep(1)\n\t\telse : self.lock.acquire()\n\t\tself.lasttime=time.time()\n\t\tif self.bdb!=None :\n\t\t\tif ro==True or self.isro==False :\n\t\t\t\tif DBDEBUG : print \"already open\",self.name\n\t\t\t\tself.lock.release()\n\t\t\t\treturn \t\t# return if the database is already open and in a compatible read-only mode\n\t\t\tif DBDEBUG : print \"reopening R/W \",self.name\n\t\t\tself.lock.release()\n\t\t\tself.close()\t# we need to reopen read-write\n\t\t\tself.lock.acquire()\n\n\t\t#if DBDEBUG:\n\t\t\t## look at the locking subsystem stats\n\t\t\t#ls=self.dbenv.lock_stat()\n\t\t\t#print \"lock_stat:\\t\",\n\t\t\t#for i in (\"nlocks\",\"maxlocks\",\"nlockers\",\"maxlockers\",\"nobjects\",\"maxobjects\",\"maxnlocks\"): print ls[i],\"\\t\",\n\t\t\t#print\n\n\n\t\tself.bdb=db.DB(self.dbenv)\t\t# we don't check BDB_CACHE_DISABLE here, since self.dbenv will already be None if its set\n\t\tif self.file==None : lfile=self.name+\".bdb\"\n\t\telse : lfile=self.file\n#\t\tprint \"open \",self.path+\"/\"+file,self.name,ro\n\t\tif ro :\n\t\t\ttry:\n\t\t\t\tself.bdb.open(self.path+\"/\"+lfile,self.name,db.DB_BTREE,db.DB_RDONLY|db.DB_THREAD)\n\t\t\texcept db.DBInvalidArgError:\n\t\t\t\tself.updateold(lfile,ro)\n\t\t\texcept db.DBNoSuchFileError:\n\t\t\t\tself.bdb=None\n\t\t\t\tself.lock.release()\n\t\t\t\tif DBDEBUG : traceback.print_exc()\n\t\t\t\traise Exception, \"Cannot open or find %s\"%self.name\n\n\t\t\t#except:\n\t\t\t\t## try one more time... this shouldn't be necessary...\n\t\t\t\t#time.sleep(1)\n##\t\t\t\ttry:\n\t\t\t\t#self.bdb.open(self.path+\"/\"+file,self.name,db.DB_BTREE,db.DB_RDONLY|db.DB_THREAD)\n##\t\t\t\texcept:\n##\t\t\t\t\traise Exception,\"Cannot open database : %s\"%self.path+\"/\"+file\n\t\t\tself.isro=True\n\t\telse :\n\t\t\ttry:\n\t\t\t\tself.bdb.open(self.path+\"/\"+lfile,self.name,db.DB_BTREE,db.DB_CREATE|db.DB_THREAD)\n\t\t\texcept db.DBInvalidArgError:\n\t\t\t\tself.updateold(lfile,ro)\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\tos.makedirs(\"%s/EMAN2DB\"%self.path)\n\t\t\t\t\tself.bdb=db.DB(self.dbenv)\n\t\t\t\t\tself.bdb.open(self.path+\"/\"+lfile,self.name,db.DB_BTREE,db.DB_CREATE|db.DB_THREAD)\n\t\t\t\texcept :\n\t\t\t\t\tself.bdb=None\n\t\t\t\t\tself.lock.release()\n\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\tprint \"Unable to open read/write %s (%s/%s)\"%(self.name,self.path,lfile)\n\t\t\t\t\treturn\n\t\t\t#except:\n\t\t\t\t## try one more time... this shouldn't be necessary...\n\t\t\t\t#time.sleep(1)\n\t\t\t\t#try:\n\t\t\t\t\t#self.bdb.open(self.path+\"/\"+file,self.name,db.DB_BTREE,db.DB_CREATE|db.DB_THREAD)\n\t\t\t\t#except:\n\t\t\t\t\t#raise Exception,\"Cannot create database : %s\"%self.path+\"/\"+file\n\t\t\tself.isro=False\n\n\t\tself.opencount+=1\t# how many times we have had to reopen this database\n\t\tDBDict.nopen+=1\n\t\tself.lock.release()\n\n\t\tglobal MAXOPEN\n\t\tif DBDict.nopen>MAXOPEN : self.close_one()\n\n\t\tif DBDEBUG : print \"Opened \",self.name",
"def test_wait_for_db_read(self):\n # if retrieves operational error then db isn't available\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n # means whenever django.db.utils.ConnectionHandles.__getitem__\n # is called during test, instead of performing the behaviour\n # replace with mock object and return True\n gi.return_velue = True\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 1)",
"def steal_test_lock(self, test_uuid):",
"def lock(self):\n self.locked = True",
"def load_locks(self):\n self.db_locks = MongoClient().test_database.db.locks\n # drop db for testing, will not be in deployed version\n self.db_locks.drop()\n # print(self.db_locks)\n return True",
"def testRetryWriteWarningInLog(self):\n\n class FakeTransactionManager(object):\n\n def commit(self):\n self.committed = True\n\n def abort(self):\n self.aborted = True\n\n self.flag = False\n\n def function():\n if not self.flag:\n self.flag = True\n raise psycopg2.Error('Error')\n return 'success'\n\n manager = FakeTransactionManager()\n transact = Transact(FakeThreadPool(), manager, lambda seconds: None)\n result = yield transact.run(function)\n self.assertEqual('success', result)\n self.assertTrue(manager.aborted)\n self.assertTrue(manager.committed)\n self.assertIn('Retrying a transaction', self.log.getvalue())",
"def __lock_catalog(self):\n\n # XXX need filesystem lock too?\n self.__lock.acquire()",
"def _lock_check(self):\n if self.check_status({\"queued\", \"running\", \"public\", \"completed\",\n \"error\"}):\n raise QiitaDBStatusError(\"Analysis is locked!\")",
"def test_staleness_single_use_readonly_autocommit(self):\n timestamp = datetime.datetime(2021, 9, 20)\n\n connection = self._make_connection()\n connection.autocommit = True\n connection.read_only = True\n connection._session_checkout = mock.MagicMock(autospec=True)\n\n connection.staleness = {\"read_timestamp\": timestamp}\n\n # mock snapshot context manager\n snapshot_obj = mock.Mock()\n snapshot_obj.execute_sql = mock.Mock(return_value=[1])\n\n snapshot_ctx = mock.Mock()\n snapshot_ctx.__enter__ = mock.Mock(return_value=snapshot_obj)\n snapshot_ctx.__exit__ = exit_ctx_func\n snapshot_method = mock.Mock(return_value=snapshot_ctx)\n\n connection.database.snapshot = snapshot_method\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT 1\")\n\n connection.database.snapshot.assert_called_with(read_timestamp=timestamp)",
"def test_simple_lock():\n lock = RedLock(\"test_simple_lock\", [{\"host\": \"localhost\"}], ttl=1000)\n locked = lock.acquire()\n lock.release()\n assert locked is True",
"def get_db_lock():\n global DB_LOCK\n DB_LOCK = DB_LOCK or Lock()\n return DB_LOCK",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass",
"def lock(self) -> None:\n self._locked = True",
"def handle_lock(table):\n lock = Lock(table)\n\n if not hasattr(context, 'locks'):\n context.locks = []\n\n # If locking the same table lock is already registered\n if len(context.locks) > 0 and context.locks[-1].table == lock.table:\n return\n\n # Registering locked table\n context.locks.append(lock)\n\n # Nothing to check if only one table locked\n if len(context.locks) == 1:\n return\n\n # Checking lock transition is allowed\n transition = (context.locks[-2].table, context.locks[-1].table)\n if transition not in ALLOWED_LOCKS_CHAINS:\n raise LockTransitionNotAllowedError()",
"def POST_lock(self, thing):\n if thing.archived_slow:\n return abort(400, \"Bad Request\")\n VNotInTimeout().run(action_name=\"lock\", target=thing)\n thing.locked = True\n thing._commit()\n\n ModAction.create(thing.subreddit_slow, c.user, target=thing,\n action='lock')\n \n \n # NEEDS ADD\n # TIMER\n #LINK TO BUMP TEXT"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Path to archive file. Because the archive file path contains runtime, it's use can cause a race condition or recursion error if used in some locations. If we removed runtime from the path we would not have a way to track changes to runtime which is more important than needing to be mindful of where this is used.
|
def archive_file(self) -> Path:
return self.project.build_directory / (
f"{self.project.source_code.root_directory.name}."
+ ("layer." if self.usage_type == "layer" else "")
+ f"{self.runtime}.{self.project.source_code.md5_hash}.zip"
)
|
[
"def _archive_path(tm_env, archive_type, instance, uniq):\n return os.path.join(tm_env.archives_dir, '%s-%s.%s.tar.gz' %\n (instance.replace('#', '-'), uniq, archive_type))",
"def getArchivePathFor(weblogentry):",
"def get_archive_install_dir(self) -> Path:\n # Prioritizes archive's install base dir over parent's install base dir\n install_base = self.arch_target_install_base or self.parent_target_install_base\n # Join archive's install dir to install base dir\n return Path(install_base.strip(r\"\\/\"), self.arch_target_install_dir.strip(r\"\\/\"))",
"def lib_path(self) -> str:\n return \"@{}//:pkg\".format(self.archive_name)",
"def get_output_file_path(self):\n zip_filename = \"%s.%s_%s.wotmod\" % (\n self.author_id, self.mod_id, self.mod_version)\n return os.path.abspath(os.path.join(self.dist_dir, zip_filename))",
"def get_real_path(self):\n return os.path.join(self.root.path, self.path, self.filename)",
"def get_abs_path(self, path):\n return self.file_dict[path]",
"def _add_link_to_archive_from_specdir_if_needed(self, archive_path: Path) -> None:\n if archive_path.parent.absolute() != self.absolute_specfile_dir:\n archive_in_spec_dir = self.absolute_specfile_dir / archive_path.name\n relative_archive_path = archive_path.relative_to(self.absolute_specfile_dir)\n\n logger.info(\n \"Linking to the specfile directory:\"\n f\" {archive_in_spec_dir} -> {relative_archive_path}\"\n f\" (absolute path to archive: {archive_path})\"\n )\n archive_in_spec_dir.symlink_to(relative_archive_path)",
"def _get_os_path(self, path):\n return to_os_path(path, self.root_dir)",
"def get_full_path(self):\r\n comp_dir_attr = self.attributes.get('DW_AT_comp_dir', None)\r\n comp_dir = bytes2str(comp_dir_attr.value) if comp_dir_attr else ''\r\n fname_attr = self.attributes.get('DW_AT_name', None)\r\n fname = bytes2str(fname_attr.value) if fname_attr else ''\r\n return os.path.join(comp_dir, fname)",
"def relPath(self):\n return os.path.join(self.relLoc, self.fileName)",
"def abspath(self):\r\n if self._abspath is None:\r\n self._abspath = os.path.abspath(self.path)\r\n return self._abspath",
"def manifest_path(self):\n return self.full_path(MANIFEST_FILENAME)",
"def mapPath(self, fsPathString):\n za = ZipArchive(self.importer.archive)\n myPath = FilePath(self.importer.archive)\n itsPath = FilePath(fsPathString)\n if myPath == itsPath:\n return za\n # This is NOT a general-purpose rule for sys.path or __file__:\n # zipimport specifically uses regular OS path syntax in its\n # pathnames, even though zip files specify that slashes are always\n # the separator, regardless of platform.\n segs = itsPath.segmentsFrom(myPath)\n zp = za\n for seg in segs:\n zp = zp.child(seg)\n return zp",
"def _version_path(self) -> str:",
"def align_path(self) -> Path:\n return self.working_directory.joinpath(f\"{self.data_source_identifier}.align.fst\")",
"def get_relative_pathname(self):\n return os.path.join(Syllabus.SYLLABUS_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)",
"def test_generate_path_w_blank_archive_dir(self):\n\n with pytest.raises(NameError):\n sami2py.archive_dir = ''\n sami2py.utils.generate_path(tag='test', lon=0, year=2012, day=277)\n\n return",
"def transform_path():\n return str(pathlib.Path(__file__).parent.absolute())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SHA256 of the archive file.
|
def code_sha256(self) -> str:
file_hash = FileHash(hashlib.sha256())
file_hash.add_file(self.archive_file)
return base64.b64encode(file_hash.digest).decode()
|
[
"def compute_sha256(self, file):\n if file:\n m = hashlib.sha256()\n m.update(file)\n return m.hexdigest()\n return 0",
"def _hash_file_sha256(directory: str, path: str) -> str:\n path = os.path.join(directory, path)\n hash_obj = hashlib.sha256()\n with open(path, \"rb\") as f:\n hash_obj.update(f.read())\n return hash_obj.hexdigest()",
"def GetFileSha256(file_path):\n return base64.b64encode(GetFileHashes(file_path, do_sha256=True)['sha256'])",
"async def __get_sha256(self, data):\n\n m = hashlib.sha256()\n m.update(data)\n return m.hexdigest()",
"def _sha256_digest(file_path) -> str:\n hash = hashlib.sha256()\n buffer = bytearray(hash.block_size * 1024) # Attempts to read in multiples of the hash block size (64KB).\n mv = memoryview(buffer)\n with open(file_path, \"rb\", buffering=0) as f:\n for bytes_read in iter(lambda: f.readinto(mv), 0):\n hash.update(mv[:bytes_read])\n return hash.hexdigest()",
"def get_file_hash(self, file_path):\n _log.info(\"Retrieving SHA256 hash of file '%s'...\" % file_path)\n answer = self._execute_command(_FilesystemFunction.HASH, file_path)\n parts = answer.split(_ANSWER_SHA256)\n if len(parts) <= 1:\n raise FileSystemException(_ERROR_EXECUTE_COMMAND %\n ((_COMMAND_ATFS % (_FilesystemFunction.HASH.command %\n file_path)).replace(\"\\r\", \"\"), \"Invalid hash received\"))\n\n return str.strip(parts[1])",
"def _update_sha256(filename, sha256):\n block_size = 64 * 1024 # 64 KB\n with open(filename, 'rb') as input_file:\n while True:\n data = input_file.read(block_size)\n if not data:\n break\n sha256.update(data)\n sha256.update(filename.encode(\"utf-8\"))\n return sha256",
"def hash_of_file(file_name):\n\ttry:\n\t\thasher=hashlib.sha256()\n\t\twith open(file_name, 'rb') as fp:\n\t\t\thasher.update(fp.read())\n\t\t\tprint(file_name,hasher.hexdigest())\n\t\tdel hasher\n\texcept Exception as e:\n\t\tprint(e)\n\t\tsys.exit(0)",
"def filehash(self):\n # This is lazily evaluated as we can be sure that we can always\n # calculate it (unless the FS itself is unreadable)\n if self._filehash is None:\n s = hashlib.sha256()\n with self.wheel_file.open('rb') as f:\n while True:\n buf = f.read(65536)\n if buf:\n s.update(buf)\n else:\n break\n self._filehash = s.hexdigest().lower()\n return self._filehash",
"async def sha256cmd(self, message):\r\n\t\tawait hashing(message, 3)",
"def get_hash(self):\r\n path = self.files[self.idx_image]\r\n filename = path.split(\"/\")[-1]\r\n with open(path,\"rb\") as f:\r\n hash_object = hashlib.sha512(f.read())\r\n hex_dig = hash_object.hexdigest()\r\n hash = filename + \" \"+ hex_dig\r\n return hash",
"def file_hash(path):\n fp = open_(path, \"rb\")\n h = hashlib.sha256()\n while True:\n data = ossafe(fp.read, \"can't read: %s\" % path, 2**18)\n if (len(data) == 0):\n break # EOF\n h.update(data)\n ossafe(fp.close, \"can't close: %s\" % path)\n return h.hexdigest()",
"def hash_file(self, bucket: str, key: str, **kwargs) -> str:\n stream = throttled_call(self.s3.get_object, Bucket=bucket, Key=key, **kwargs)[\n \"Body\"\n ]\n\n # 5 megabytes\n block_size = 5242880\n\n hasher = hashlib.sha256()\n buffer = stream.read(block_size)\n\n while buffer:\n hasher.update(buffer)\n buffer = stream.read(block_size)\n\n return hasher.hexdigest()",
"def fetch_tarball_sha256(url):\n logging.info(\"Fetching tarball from {}...\".format(url))\n response = requests.get(url, stream=True)\n sha256 = hashlib.sha256()\n for chunk in response.iter_content(chunk_size=1024 * 1024):\n sha256.update(chunk)\n hex_hash = sha256.hexdigest()\n logging.info(\"Downloaded {} with hash {}\".format(url, hex_hash))\n return hex_hash",
"def getScreenshotSha256(self, oFile):\n (oFile, _, _) = self.oTestSet.openFile(oFile.sFile, 'rb');\n try:\n abImageFile = oFile.read();\n except Exception as oXcpt:\n self.oSheriff.vprint(u'Error reading the \"%s\" image file: %s' % (oFile.sFile, oXcpt,))\n else:\n try:\n oImage = Image.open(StringIO.StringIO(abImageFile));\n except Exception as oXcpt:\n self.oSheriff.vprint(u'Error opening the \"%s\" image bytes using PIL.Image.open: %s' % (oFile.sFile, oXcpt,))\n else:\n try:\n oHash = hashlib.sha256();\n oHash.update(oImage.tostring());\n except Exception as oXcpt:\n self.oSheriff.vprint(u'Error hashing the uncompressed image bytes for \"%s\": %s' % (oFile.sFile, oXcpt,))\n else:\n return oHash.hexdigest();\n return None;",
"def hash(bytes):\n return unpack(sha256(bytes).digest())",
"def checksum(self, fileName):\n\n tar = tarfile.open(fileName, mode='r')\n lsl = [(x.name, int(x.size), int(x.mtime), x.uname) for x in tar.getmembers()]\n hasher = hashlib.sha256(str(lsl))\n checksum = hasher.hexdigest()\n\n return checksum",
"def hash(self):\n return sha256_proto(self.block.block_header)",
"def sha(self):\n return self._commit.hexsha"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List of compatible instruction set architectures.
|
def compatible_architectures(self) -> Optional[List[str]]:
return self.project.compatible_architectures
|
[
"def supported_archs(self):\n return self.SUPPORTED_ARCHS",
"def get_available_architectures(self):\n query = \"select distinct architecture from packages where architecture != 'all'\"\n\n #just check if any of the rows retured is empty\n return [ arch[0] for arch in self.__execute_query(query) if arch[0] ]",
"def get_machine_arch(self):\n architectures = {\n 'EM_M32' : 'AT&T WE 32100',\n 'EM_SPARC' : 'SPARC',\n 'EM_386' : 'x86',\n 'EM_68K' : 'Motorola 68000',\n 'EM_88K' : 'Motorola 88000',\n 'EM_IAMCU' : 'Intel MCU',\n 'EM_860' : 'Intel 80860',\n 'EM_MIPS' : 'MIPS',\n 'EM_S370' : 'IBM System/370',\n 'EM_MIPS_RS3_LE' : 'MIPS RS3000 Little-endian',\n 'EM_PARISC' : 'Hewlett-Packard PA-RISC',\n 'EM_VPP500' : 'Fujitsu VPP500',\n 'EM_SPARC32PLUS' : 'Enhanced SPARC',\n 'EM_960' : 'Intel 80960',\n 'EM_PPC' : 'PowerPC',\n 'EM_PPC64' : '64-bit PowerPC',\n 'EM_S390' : 'IBM System/390',\n 'EM_SPU' : 'IBM SPU/SPC',\n 'EM_V800' : 'NEC V800',\n 'EM_FR20' : 'Fujitsu FR20',\n 'EM_RH32' : 'TRW RH-32',\n 'EM_RCE' : 'Motorola RCE',\n 'EM_ARM' : 'ARM',\n 'EM_ALPHA' : 'Digital Alpha',\n 'EM_SH' : 'Hitachi SH',\n 'EM_SPARCV9' : 'SPARC Version 9',\n 'EM_TRICORE' : 'Siemens TriCore embedded processor',\n 'EM_ARC' : 'Argonaut RISC Core, Argonaut Technologies Inc.',\n 'EM_H8_300' : 'Hitachi H8/300',\n 'EM_H8_300H' : 'Hitachi H8/300H',\n 'EM_H8S' : 'Hitachi H8S',\n 'EM_H8_500' : 'Hitachi H8/500',\n 'EM_IA_64' : 'Intel IA-64',\n 'EM_MIPS_X' : 'MIPS-X',\n 'EM_COLDFIRE' : 'Motorola ColdFire',\n 'EM_68HC12' : 'Motorola M68HC12',\n 'EM_MMA' : 'Fujitsu MMA',\n 'EM_PCP' : 'Siemens PCP',\n 'EM_NCPU' : 'Sony nCPU',\n 'EM_NDR1' : 'Denso NDR1',\n 'EM_STARCORE' : 'Motorola Star*Core',\n 'EM_ME16' : 'Toyota ME16',\n 'EM_ST100' : 'STMicroelectronics ST100',\n 'EM_TINYJ' : 'Advanced Logic TinyJ',\n 'EM_X86_64' : 'x64',\n 'EM_PDSP' : 'Sony DSP',\n 'EM_PDP10' : 'Digital Equipment PDP-10',\n 'EM_PDP11' : 'Digital Equipment PDP-11',\n 'EM_FX66' : 'Siemens FX66',\n 'EM_ST9PLUS' : 'STMicroelectronics ST9+ 8/16 bit',\n 'EM_ST7' : 'STMicroelectronics ST7 8-bit',\n 'EM_68HC16' : 'Motorola MC68HC16',\n 'EM_68HC11' : 'Motorola MC68HC11',\n 'EM_68HC08' : 'Motorola MC68HC08',\n 'EM_68HC05' : 'Motorola MC68HC05',\n 'EM_SVX' : 'Silicon Graphics SVx',\n 'EM_ST19' : 'STMicroelectronics ST19 8-bit',\n 'EM_VAX' : 'Digital VAX',\n 'EM_CRIS' : 'Axis Communications 32-bit',\n 'EM_JAVELIN' : 'Infineon Technologies 32-bit',\n 'EM_FIREPATH' : 'Element 14 64-bit DSP',\n 'EM_ZSP' : 'LSI Logic 16-bit DSP',\n 'EM_MMIX' : 'Donald Knuth\\'s educational 64-bit',\n 'EM_HUANY' : 'Harvard University machine-independent object files',\n 'EM_PRISM' : 'SiTera Prism',\n 'EM_AVR' : 'Atmel AVR 8-bit',\n 'EM_FR30' : 'Fujitsu FR30',\n 'EM_D10V' : 'Mitsubishi D10V',\n 'EM_D30V' : 'Mitsubishi D30V',\n 'EM_V850' : 'NEC v850',\n 'EM_M32R' : 'Mitsubishi M32R',\n 'EM_MN10300' : 'Matsushita MN10300',\n 'EM_MN10200' : 'Matsushita MN10200',\n 'EM_PJ' : 'picoJava',\n 'EM_OPENRISC' : 'OpenRISC 32-bit',\n 'EM_ARC_COMPACT' : 'ARC International ARCompact',\n 'EM_XTENSA' : 'Tensilica Xtensa',\n 'EM_VIDEOCORE' : 'Alphamosaic VideoCore',\n 'EM_TMM_GPP' : 'Thompson Multimedia',\n 'EM_NS32K' : 'National Semiconductor 32000 series',\n 'EM_TPC' : 'Tenor Network TPC',\n 'EM_SNP1K' : 'Trebia SNP 1000',\n 'EM_ST200' : 'STMicroelectronics ST200',\n 'EM_IP2K' : 'Ubicom IP2xxx',\n 'EM_MAX' : 'MAX',\n 'EM_CR' : 'National Semiconductor CompactRISC',\n 'EM_F2MC16' : 'Fujitsu F2MC16',\n 'EM_MSP430' : 'Texas Instruments msp430',\n 'EM_BLACKFIN' : 'Analog Devices Blackfin',\n 'EM_SE_C33' : 'Seiko Epson S1C33',\n 'EM_SEP' : 'Sharp',\n 'EM_ARCA' : 'Arca RISC',\n 'EM_UNICORE' : 'PKU-Unity MPRC',\n 'EM_EXCESS' : 'eXcess',\n 'EM_DXP' : 'Icera Semiconductor Deep Execution Processor',\n 'EM_ALTERA_NIOS2' : 'Altera Nios II',\n 'EM_CRX' : 'National Semiconductor CompactRISC CRX',\n 'EM_XGATE' : 'Motorola XGATE',\n 'EM_C166' : 'Infineon C16x/XC16x',\n 'EM_M16C' : 'Renesas M16C',\n 'EM_DSPIC30F' : 'Microchip Technology dsPIC30F',\n 'EM_CE' : 'Freescale Communication Engine RISC core',\n 'EM_M32C' : 'Renesas M32C',\n 'EM_TSK3000' : 'Altium TSK3000',\n 'EM_RS08' : 'Freescale RS08',\n 'EM_SHARC' : 'Analog Devices SHARC',\n 'EM_ECOG2' : 'Cyan Technology eCOG2',\n 'EM_SCORE7' : 'Sunplus S+core7 RISC',\n 'EM_DSP24' : 'New Japan Radio (NJR) 24-bit DSP',\n 'EM_VIDEOCORE3' : 'Broadcom VideoCore III',\n 'EM_LATTICEMICO32' : 'Lattice FPGA RISC',\n 'EM_SE_C17' : 'Seiko Epson C17',\n 'EM_TI_C6000' : 'TI TMS320C6000',\n 'EM_TI_C2000' : 'TI TMS320C2000',\n 'EM_TI_C5500' : 'TI TMS320C55x',\n 'EM_TI_ARP32' : 'TI Application Specific RISC, 32bit',\n 'EM_TI_PRU' : 'TI Programmable Realtime Unit',\n 'EM_MMDSP_PLUS' : 'STMicroelectronics 64bit VLIW',\n 'EM_CYPRESS_M8C' : 'Cypress M8C',\n 'EM_R32C' : 'Renesas R32C',\n 'EM_TRIMEDIA' : 'NXP Semiconductors TriMedia',\n 'EM_QDSP6' : 'QUALCOMM DSP6',\n 'EM_8051' : 'Intel 8051',\n 'EM_STXP7X' : 'STMicroelectronics STxP7x',\n 'EM_NDS32' : 'Andes Technology RISC',\n 'EM_ECOG1' : 'Cyan Technology eCOG1X',\n 'EM_ECOG1X' : 'Cyan Technology eCOG1X',\n 'EM_MAXQ30' : 'Dallas Semiconductor MAXQ30',\n 'EM_XIMO16' : 'New Japan Radio (NJR) 16-bit',\n 'EM_MANIK' : 'M2000 Reconfigurable RISC',\n 'EM_CRAYNV2' : 'Cray Inc. NV2',\n 'EM_RX' : 'Renesas RX',\n 'EM_METAG' : 'Imagination Technologies META',\n 'EM_MCST_ELBRUS' : 'MCST Elbrus',\n 'EM_ECOG16' : 'Cyan Technology eCOG16',\n 'EM_CR16' : 'National Semiconductor CompactRISC CR16 16-bit',\n 'EM_ETPU' : 'Freescale',\n 'EM_SLE9X' : 'Infineon Technologies SLE9X',\n 'EM_L10M' : 'Intel L10M',\n 'EM_K10M' : 'Intel K10M',\n 'EM_AARCH64' : 'AArch64',\n 'EM_AVR32' : 'Atmel 32-bit',\n 'EM_STM8' : 'STMicroeletronics STM8 8-bit',\n 'EM_TILE64' : 'Tilera TILE64',\n 'EM_TILEPRO' : 'Tilera TILEPro',\n 'EM_MICROBLAZE' : 'Xilinx MicroBlaze 32-bit RISC',\n 'EM_CUDA' : 'NVIDIA CUDA',\n 'EM_TILEGX' : 'Tilera TILE-Gx',\n 'EM_CLOUDSHIELD' : 'CloudShield',\n 'EM_COREA_1ST' : 'KIPO-KAIST Core-A 1st generation',\n 'EM_COREA_2ND' : 'KIPO-KAIST Core-A 2nd generation',\n 'EM_ARC_COMPACT2' : 'Synopsys ARCompact V2',\n 'EM_OPEN8' : 'Open8 8-bit RISC',\n 'EM_RL78' : 'Renesas RL78',\n 'EM_VIDEOCORE5' : 'Broadcom VideoCore V',\n 'EM_78KOR' : 'Renesas 78KOR',\n 'EM_56800EX' : 'Freescale 56800EX',\n 'EM_BA1' : 'Beyond BA1',\n 'EM_BA2' : 'Beyond BA2',\n 'EM_XCORE' : 'XMOS xCORE',\n 'EM_MCHP_PIC' : 'Microchip 8-bit PIC',\n 'EM_INTEL205' : 'Reserved by Intel',\n 'EM_INTEL206' : 'Reserved by Intel',\n 'EM_INTEL207' : 'Reserved by Intel',\n 'EM_INTEL208' : 'Reserved by Intel',\n 'EM_INTEL209' : 'Reserved by Intel',\n 'EM_KM32' : 'KM211 KM32 32-bit',\n 'EM_KMX32' : 'KM211 KMX32 32-bit',\n 'EM_KMX16' : 'KM211 KMX16 16-bit',\n 'EM_KMX8' : 'KM211 KMX8 8-bit',\n 'EM_KVARC' : 'KM211 KVARC',\n 'EM_CDP' : 'Paneve CDP',\n 'EM_COGE' : 'Cognitive',\n 'EM_COOL' : 'Bluechip Systems CoolEngine',\n 'EM_NORC' : 'Nanoradio Optimized RISC',\n 'EM_CSR_KALIMBA' : 'CSR Kalimba',\n 'EM_Z80' : 'Zilog Z80',\n 'EM_VISIUM' : 'VISIUMcore',\n 'EM_FT32' : 'FTDI Chip FT32 32-bit RISC',\n 'EM_MOXIE' : 'Moxie',\n 'EM_AMDGPU' : 'AMD GPU',\n 'EM_RISCV' : 'RISC-V'\n }\n\n return architectures.get(self['e_machine'], '<unknown>')",
"def archs(self, _args):\n print('{Style.BRIGHT}Available target architectures are:'\n '{Style.RESET_ALL}'.format(Style=Out_Style))\n for arch in self.ctx.archs:\n print(' {}'.format(arch.arch))",
"def get_supported_architectures():\n\n arch_list = []\n url = \"http://ftp.uk.debian.org/debian/dists/stable/main/\"\n sauce = urllib.request.urlopen(url).read() # create a request, open communication channel for reading html page\n soup = bs.BeautifulSoup(sauce, features= 'html.parser')\n span = soup.find_all('a') # find all <a> ... </a> html tags that contain 'href' with our content indices file\n for fl_span in span:\n if(fl_span.get('href').split('-')[0] == 'Contents' and not fl_span.get('href').split('-')[1].startswith('udeb')): # parse the retrieved contents index file to only get the architecture\n arch_list.append(fl_span.get('href').split('-')[1].split('.')[0]) # append the architecture to list of acceptable architectures\n else:\n continue\n return arch_list",
"def architecture(self):\n return self._arch",
"def OSArchitecture(self) -> Architecture:",
"def get_architecture_str(self):\n raise NotImplementedError",
"def is_supported(self, arch_list: list[str]) -> bool:\n return not set(self.supported).isdisjoint(set(arch_list))",
"def get_package_architecture(self):\n res = self.cli(command=\"show version detail\").response()\n if res:\n search = re.search(r'(64-bit Kernel|Kernel\\s+64-bit)', res, re.I)\n arch = '64' if search else '32'\n self.log(level='INFO', message=\"Kernel architecture: %s\" % arch)\n else:\n arch = '32'\n self.log(level='INFO', message=\"Setting default architecture: %s\" % arch)\n return arch",
"def SupportedPlatforms(self):\n return self.platform_infos.keys()",
"def architecture():\n import platform\n return platform.architecture()[0][:-3]",
"def get_machine_arch(self):\r\n if self['e_machine'] == 'EM_X86_64':\r\n return 'x64'\r\n elif self['e_machine'] in ('EM_386', 'EM_486'):\r\n return 'x86'\r\n elif self['e_machine'] == 'EM_ARM':\r\n return 'ARM'\r\n elif self['e_machine'] == 'EM_AARCH64':\r\n return 'AArch64'\r\n else:\r\n return '<unknown>'",
"def list_supported_algorithms(self):\r\n return list(self._ALGORITHM_TO_VALUE_MAP.keys())",
"def stringForArchitecture(t):\n return {\n Instruction.ARCHITECTURE_UNKNOWN : \"unknown\",\n Instruction.ARCHITECTURE_i386 : \"i386\",\n Instruction.ARCHITECTURE_X86_64 : \"x86_64\",\n Instruction.ARCHITECTURE_ARM : \"ARM\",\n Instruction.ARCHITECTURE_ARM_THUMB : \"ARM (Thumb)\",\n Instruction.ARCHITECTURE_AARCH64 : \"AArch64\",\n Instruction.ARCHITECTURE_OTHER : \"Other (plugin)\"\n }.get(t, \"<unknown>\")",
"def system_cpu_arch(self) -> str:\n machine = platform.machine()\n if machine in [\"AMD64\", \"x86_64\"]:\n return \"x64\"\n else:\n return machine",
"def hardwarearchitecture_list(self, *args, **kwargs):\n ha = record_lib.hardware_architecture_provider()\n ha_updated = Settings.HARDWARE_ARCHITECTURES_UPDATED\n rez = {\n 'header':{\n 'date_updated': ha_updated.strftime('%Y-%m-%dT%H:%M:%S'),\n },\n 'body':{\n 'hardware_architectures': ha,\n }\n }\n return service.setResponseJSON(rez)",
"def _supported_imts(self):\n imt_list = []\n for key in self.imls:\n if \"SA\" in key:\n imt_list.append(imt_module.SA)\n elif key == \"T\":\n continue\n else:\n try:\n imt_val = imt_module.from_string(key)\n except:\n continue\n imt_list.append(imt_val.__class__)\n return imt_list",
"def get_arch(machine_type):\n result = lock.list_locks(machine_type=machine_type, count=1)\n if not result:\n log.warn(\"No machines found with machine_type %s!\", machine_type)\n else:\n return result[0]['arch']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List of compatible runtimes.
|
def compatible_runtimes(self) -> Optional[List[str]]:
return self.project.compatible_runtimes
|
[
"def compatible_runtimes(self) -> Optional[List[str]]:\n if self.META_TAGS[\"compatible_runtimes\"] in self.object_tags:\n return self.object_tags[self.META_TAGS[\"compatible_runtimes\"]].split(\"+\")\n return None",
"async def runtimes(self) -> List[Runtime]:\n runtimes = await self._http_session.get_response(\"get\", \"runtimes/\")\n runtime_list = []\n for runtime in runtimes:\n runtime_list.append(\n Runtime(\n language=runtime.get(\"language\"),\n aliases=runtime.get(\"aliases\"),\n version=runtime.get(\"version\"),\n runtime=runtime.get(\"runtime\")\n )\n )\n self._runtimes = runtime_list\n return runtime_list",
"def get_dotnet_runtimes() -> List[dotnet_const.Runtime]:\n runtimes = []\n for line in check_output([get_exe_name(\"dotnet\"), \"--list-runtimes\"]).decode(\"utf-8\").splitlines():\n name, version, path = line.split(\" \", 2)\n path = join(path[1:-1], version)\n runtimes.append(dotnet_const.Runtime(name=name, version=version, path=path))\n return runtimes",
"def GetSupportedEngines():\r\n pass",
"def _get_supportedProductTypes(self) -> \"std::vector< std::string,std::allocator< std::string > >\" :\n return _core.Application__get_supportedProductTypes(self)",
"def supported_types(self) -> List[BundleType]:",
"def runtime_packages(self):\n return self.packages | self.depends",
"def list_available_drivers():\n return drivers.available_drivers()",
"def available_engines() -> Sequence[\"DiffEngine\"]:\n try:\n return tuple(getattr(DiffEngine, \"_available_engines\"))\n except AttributeError:\n result = []\n try:\n result.append(DiffEngine.create(name=\"native\"))\n except ImportError:\n pass\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=True))\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=False))\n result = tuple(result)\n setattr(DiffEngine, \"_available_engines\", result)\n return result",
"def _get_runtime_supported_types(opset_version: target) -> Set[type]:\n supported_types = {types.fp16, types.fp32, types.int32, types.str, types.bool}\n if opset_version >= target.iOS17:\n supported_types.update({types.int8, types.uint8, types.int16, types.uint16})\n return supported_types",
"def compatible_architectures(self) -> Optional[List[str]]:\n return self.project.compatible_architectures",
"def SupportedPlatforms(self):\n return self.platform_infos.keys()",
"def platform() -> list:\n if GetOS.OS == \"Linux\":\n x = InformationManager(SysFiles.ver.value)\n return x.openF().read().split()[0]\n elif GetOS.OS == \"darwin\":\n x = get_output(\"sw_vers\")\n return x.split()[1:3]",
"def supported_tags(self, force_manylinux=True):\n return _get_supported(\n platform=self.platform,\n impl=self.impl,\n version=self.version,\n abi=self.abi,\n force_manylinux=force_manylinux\n )",
"def get_backend_list():\n lst = [NumpyBackend(), ]\n\n if torch:\n lst.append(TorchBackend())\n\n if jax:\n lst.append(JaxBackend())\n\n return lst",
"def test_determine_runtime():\n runtime = determine_runtime()\n try:\n assert \"docker\" in runtime or \"podman\" in runtime\n except ContainerRuntimeException:\n pass",
"def list_runtimes(self, docker_image_name='all'):\n if docker_image_name == 'default':\n docker_image_name = self._get_default_runtime_image_name()\n runtimes = []\n actions = self.cf_client.list_actions(self.package)\n\n for action in actions:\n action_image_name, memory = self._unformat_action_name(action['name'])\n if docker_image_name == action_image_name or docker_image_name == 'all':\n runtimes.append((action_image_name, memory))\n return runtimes",
"def from_simctl_info(info: List[Dict[str, Any]]) -> List[\"Runtime\"]:\n runtimes = []\n for runtime_info in info:\n runtimes.append(Runtime(runtime_info))\n return runtimes",
"def runtime_info():\n return True, runtime.runtime_info()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
MD5 of the archive file.
|
def md5_checksum(self) -> str:
file_hash = FileHash(hashlib.md5())
file_hash.add_file(self.archive_file)
return base64.b64encode(file_hash.digest).decode()
|
[
"def MD5Sum(klass, filename):\n return hashlib.md5(path(filename).text()).hexdigest()[:8]",
"def file_md5(filename):\r\n file_o = read_file(filename)\r\n file_str = file_o.read()\r\n file_o.close()\r\n return string_md5(file_str)",
"def _get_file_md5sum(file_name):\n hash_obj = hashlib.md5()\n with open(file_name, 'rb') as f:\n hash_obj.update(f.read())\n return hash_obj.hexdigest().encode('utf-8')",
"def checksum_md5 (filename) :\n fname = filename\n block_size = 0x10000\n fd = open(fname, \"rb\")\n try:\n block = [ fd.read(block_size) ]\n while len(block[-1]) > 0 :\n block.append ( fd.read(block_size) )\n contents = block\n zero = hashlib.md5()\n i = 0 \n for el in contents :\n i += 1\n zero.update( el )\n m = zero\n return m.hexdigest()\n finally:\n fd.close()\n return None",
"def calc_md5( path_filename ):\n hash_md5 = hashlib.md5()\n with open( path_filename , \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()",
"def md5sum_file(filename):\n import hashlib\n \n infile = open(filename, 'rb')\n content = infile.read()\n infile.close()\n m = hashlib.md5() \n m.update(content)\n md5 = m.hexdigest() # now the md5 variable contains the MD5 sum\n \n return md5",
"def md5(self, refresh=False):\r\n if refresh or self._md5 is None:\r\n h = hashlib.md5()\r\n with open(self.path, 'rb') as fp:\r\n chunk = fp.read(2048)\r\n while chunk:\r\n h.update(chunk)\r\n chunk = fp.read(2048)\r\n self._md5 = h.hexdigest()\r\n return self._md5",
"def _get_md5sum(self, fpath):\n try:\n current_md5 = hashlib.md5()\n if isinstance(fpath, six.string_types) and os.path.exists(fpath):\n with open(fpath, \"rb\") as fh:\n for chunk in self._read_chunks(fh):\n current_md5.update(chunk)\n\n elif (fpath.__class__.__name__ in [\"StringIO\", \"StringO\"] or\n isinstance(fpath, IOBase)):\n for chunk in self._read_chunks(fpath):\n current_md5.update(chunk)\n else:\n return \"\"\n return current_md5.hexdigest()\n except Exception:\n msg = (\"Failed to calculate the image's md5sum\")\n LOG.error(msg)\n raise exception.SDKImageOperationError(rs=3)",
"def calculate_md5_checksum(filename):\n\n length = io.DEFAULT_BUFFER_SIZE\n md5 = hashlib.md5()\n\n with io.open(filename, mode=\"rb\") as fd:\n for chunk in iter(lambda: fd.read(length), b''):\n md5.update(chunk)\n\n return md5.hexdigest()",
"def calcFileMd5sum(filename): \n\n m = hashlib.md5()\n\n # Read file in as 128 byte chunks\n with open(filename) as f: m.update(f.read(128))\n \n return m.hexdigest()",
"def md5sum():\r\n hashSum = None\r\n try:\r\n # Open as read binary\r\n config = _ConfigFile._open('rb', yaml=False)\r\n\r\n # pipe contents of the file through\r\n hashSum = md5(config).hexdigest()\r\n except:\r\n hashSum = 'none'\r\n\r\n return hashSum",
"def compute_md5_sum(self, resource: GenomicResource, filename: str) -> str:\r\n logger.debug(\r\n \"compute md5sum for %s in %s\", filename, resource.resource_id)\r\n\r\n with self.open_raw_file(resource, filename, \"rb\") as infile:\r\n md5_hash = hashlib.md5()\r\n while chunk := infile.read(self.CHUNK_SIZE):\r\n md5_hash.update(chunk)\r\n return md5_hash.hexdigest()",
"def compute_md5(file):\n md5 = hashlib.md5()\n while True:\n buf = file.read(8192)\n if not buf:\n break\n md5.update(buf)\n return md5",
"def md5_filelike(filelike):\n m = hashlib.md5()\n while True:\n s = filelike.read()\n if len(s) == 0:\n break\n else:\n m.update(s)\n return m.hexdigest()",
"def get_hash_md5(self):\n\n if md5 is not None:\n return md5( self.get_data() ).hexdigest()",
"def svn_fs_file_md5_checksum(*args) -> \"unsigned char [ANY]\":\n return _fs.svn_fs_file_md5_checksum(*args)",
"def filehash(file):\n hasher = hashlib.md5()\n f = open(file, 'rb')\n buf = f.read()\n hasher.update(buf)\n return hasher.hexdigest()",
"def validate_dicom_archive_md5sum(self, tarchive_path):\n\n # compute the md5sum of the tarchive file\n tarchive_file_md5sum = utilities.compute_md5_hash(tarchive_path)\n\n # grep the md5sum stored in the database\n tarchive_db_md5sum = self.tarchive_info_dict['md5sumArchive'].split()[0]\n\n # check that the two md5sum are the same\n result = dict()\n if tarchive_db_md5sum == tarchive_file_md5sum:\n result['success'] = True\n result['message'] = f\"checksum for target: {tarchive_file_md5sum}; \" \\\n f\"checksum from database: {tarchive_db_md5sum}\"\n else:\n result['success'] = False\n result['message'] = \"ERROR: DICOM archive seems corrupted or modified. Upload will exit now.\"\n\n return result",
"def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
S3 object version ID.
|
def object_version_id(self) -> Optional[str]:
if (
not self._put_object_response
or "VersionId" not in self._put_object_response
):
return None
return self._put_object_response["VersionId"]
|
[
"def _object_version_id(self, filename):\n attribute_block = self._object_attribute_block(filename)\n return attribute_block.version_id",
"def as_object_version_id(value):\n return value.version_id if isinstance(value, ObjectVersion) else value",
"def get_id(self):\n return self.bucket_id",
"def version(self) -> str:\n return uuid.uuid4().hex[:30] + str(int(time.time()))",
"def ios_version_id(self) -> str:\n return pulumi.get(self, \"ios_version_id\")",
"def get_version_id(self, client: DandiAPIClient) -> str:\n if self.version_id is None:\n r = client.get(f\"/dandisets/{self.dandiset_id}/\")\n version = r[\"most_recent_published_version\"] or r[\"draft_version\"]\n return version[\"version\"]\n else:\n return self.version_id",
"def version(self):\n return self.metadata.version",
"def id(self, version=2):\n if version == 2:\n if self._id is None:\n self._id = self._get_id()\n return self._id\n elif version == 1:\n if self._v1_id is None:\n self._v1_id = self._get_v1_id()\n return self._v1_id\n else:\n raise Pad.FormatVersion(\"unknown format version %d for ID\" % version)",
"def model_version_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"model_version_id\")",
"def getVersion(signature):\r\n return getToken(signature, \"Version: \", ' ')",
"def get_genomic_resource_id_version(self) -> str:\r\n return f\"{self.resource_id}{version_tuple_to_suffix(self.version)}\"",
"def get_metadata_version_name():\n # type: (None) -> str\n return _METADATA_VERSION_NAME",
"def version(self):\n\n return self.__entity[\"version\"]",
"def version(self) -> int:\n return self._number",
"def get_source_s3_key(self, stamp=None):\n stamp = stamp or to_timestamp(self.uploaded_on)\n return f\"{self.pk}/video/{self.pk}/{stamp}\"",
"def _get_upload_storage_broker_version(self):\n sb_version = None\n structure_key = self.get_structure_key()\n if _object_exists(self.s3resource, self.bucket, structure_key):\n structure_parameters_txt = self.get_text(structure_key)\n structure_parameters = json.loads(structure_parameters_txt)\n if \"storage_broker_version\" in structure_parameters:\n sb_version = structure_parameters[\"storage_broker_version\"]\n return sb_version",
"def find_version_attribute(obj):\n if \"modified\" in obj:\n return \"modified\"\n elif \"created\" in obj:\n return \"created\"\n elif \"_date_added\" in obj:\n return \"_date_added\"",
"def get_source_s3_key(self, stamp=None):\n stamp = stamp or to_timestamp(self.uploaded_on)\n return f\"{self.video.pk}/thumbnail/{self.pk}/{stamp}\"",
"def get_object_id(input_image_path):\n input_image_path = input_image_path.replace('--original', '')\n file_name = input_image_path.split('/')[-1]\n object_id = '.'.join(file_name.split('/')[-1].split('.')[:-1])\n return object_id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Runtime of the deployment package.
|
def runtime(self) -> str:
return self.project.runtime
|
[
"def generate_runtime_container(self):\n for version in self.versions:\n self.display('docker build -f {}/dockerfiles/{}_{}.d -t {} {}'.format(\n self.tmp, self.runtime, version, 'continuous:{}_{}'.format(self.runtime, version), self.tmp), \"yellow\")\n self.exec('docker build -f {}/dockerfiles/{}_{}.d -t {} {}'.format(\n self.tmp, self.runtime, version, 'continuous:{}_{}'.format(self.runtime, version), self.tmp), not self.verbose)",
"def _install_runtime(self, sys_path, dest_path):\n raise NotImplementedError()",
"async def run_runtime(self) -> None:\n self._state.set(RuntimeStates.starting)\n await asyncio.gather(\n self._start_multiplexer(), self._start_agent_loop(), self._start_storage()\n )",
"def _generate_runtime_meta(self, docker_image_name):\n action_code = \"\"\"\n import sys\n import pkgutil\n\n def main(args):\n print(\"Extracting preinstalled Python modules...\")\n runtime_meta = dict()\n mods = list(pkgutil.iter_modules())\n runtime_meta[\"preinstalls\"] = [entry for entry in sorted([[mod, is_pkg] for _, mod, is_pkg in mods])]\n python_version = sys.version_info\n runtime_meta[\"python_ver\"] = str(python_version[0])+\".\"+str(python_version[1])\n print(\"Done!\")\n return runtime_meta\n \"\"\"\n\n runtime_memory = 128\n # old_stdout = sys.stdout\n # sys.stdout = open(os.devnull, 'w')\n action_name = self._format_action_name(docker_image_name, runtime_memory)\n self.cf_client.create_package(self.package)\n self.cf_client.create_action(self.package, action_name, docker_image_name,\n is_binary=False, code=textwrap.dedent(action_code),\n memory=runtime_memory, timeout=30000)\n # sys.stdout = old_stdout\n logger.debug(\"Extracting Python modules list from: {}\".format(docker_image_name))\n\n try:\n retry_invoke = True\n while retry_invoke:\n retry_invoke = False\n runtime_meta = self.cf_client.invoke_with_result(self.package, action_name)\n if 'activationId' in runtime_meta:\n retry_invoke = True\n except Exception:\n raise(\"Unable to invoke 'modules' action\")\n try:\n self.delete_runtime(docker_image_name, runtime_memory)\n except Exception:\n raise Exception(\"Unable to delete 'modules' action\")\n\n if not runtime_meta or 'preinstalls' not in runtime_meta:\n raise Exception(runtime_meta)\n\n return runtime_meta",
"def runtime_handler(self) -> str:\n return pulumi.get(self, \"runtime_handler\")",
"def create_runtime(self, docker_image_name, memory, timeout):\n if docker_image_name == 'default':\n docker_image_name = self._get_default_runtime_image_name()\n\n runtime_meta = self._generate_runtime_meta(docker_image_name)\n\n logger.info('Creating new Lithops runtime based on Docker image {}'.format(docker_image_name))\n\n self.cf_client.create_package(self.package)\n action_name = self._format_action_name(docker_image_name, memory)\n\n create_function_handler_zip(openwhisk_config.FH_ZIP_LOCATION, '__main__.py', __file__)\n\n with open(openwhisk_config.FH_ZIP_LOCATION, \"rb\") as action_zip:\n action_bin = action_zip.read()\n self.cf_client.create_action(self.package, action_name, docker_image_name, code=action_bin,\n memory=memory, is_binary=True, timeout=timeout*1000)\n self._delete_function_handler_zip()\n return runtime_meta",
"def get_runtime() -> RuntimeHandle:\n from bqskit.runtime.worker import get_worker\n return get_worker()",
"async def runtimes(self) -> List[Runtime]:\n runtimes = await self._http_session.get_response(\"get\", \"runtimes/\")\n runtime_list = []\n for runtime in runtimes:\n runtime_list.append(\n Runtime(\n language=runtime.get(\"language\"),\n aliases=runtime.get(\"aliases\"),\n version=runtime.get(\"version\"),\n runtime=runtime.get(\"runtime\")\n )\n )\n self._runtimes = runtime_list\n return runtime_list",
"def runtime_environment(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"runtime_environment\")",
"def _deploy_release():\n require('hosts')\n require('path')\n symlink_current_release()\n install_requirements()\n install_site()\n migrate()\n restart_webserver()",
"def get_runtime_dir(self):\n \n # Get info on cached version\n cur_version, path = self.get_cached_version()\n sys_version, sys_path = self.get_system_version()\n \n # Do we need an install?\n install_action = None\n if not (cur_version or sys_version):\n raise RuntimeError('Dont have cached version of runtime %s nor can '\n 'install it.' % self.get_name())\n elif not cur_version:\n install_action = 'install'\n elif not sys_version:\n # No specific version required, e.g. because can assume that we\n # have an up-to-date version.\n pass\n elif versionstring(cur_version) < versionstring(sys_version):\n install_action = 'update'\n \n # Install if necessary and update version\n if install_action:\n logger.info('Performing %s of runtime %s' %\n (install_action, self.get_name()))\n path = op.join(RUNTIME_DIR, self.get_name() + '_' +sys_version)\n self._install_runtime(sys_path, path)\n \n assert os.path.isdir(path)\n lock_runtime_dir(path)\n return path",
"def get_ios_runtime():\n location = os.path.join(IOS_PATH, \"tns-ios.tgz\")\n shutil.copy2(location.strip(), os.path.join(os.getcwd(), SUT_ROOT_FOLDER, \"tns-ios.tgz\"))\n if File.exists(os.path.join(os.getcwd(), IOS_RUNTIME_PATH)):\n extract_archive(IOS_RUNTIME_PATH, os.path.splitext(IOS_RUNTIME_PATH)[0])",
"def deploy_eis_app():",
"def runtime_environment(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"runtime_environment\")",
"def runtime_config(self) -> 'outputs.RuntimeConfigResponse':\n return pulumi.get(self, \"runtime_config\")",
"def runtime_handler(self) -> Optional[str]:\n return pulumi.get(self, \"runtime_handler\")",
"def deploy():\n test()\n require('hosts', provided_by=servers)\n require('path')\n env.release = time.strftime('%Y-%m-%d-%H-%M')\n upload_tar_from_git()\n install_requirements()\n install_site()\n symlink_current_release()\n migrate()\n collect_static()\n restart_webserver()\n remove_remote_package()",
"def deploy_app():\r\n upload_and_explode_code_bundle()\r\n symlink_current_release()",
"def deploy(self, release, force_deploy=False, rollback_on_failure=True): # noqa\n if release.build is None:\n raise DeisException('No build associated with this release')\n\n # use create to make sure minimum resources are created\n self.create()\n\n # set processes structure to default if app is new.\n if self.structure == {}:\n self.structure = self._default_structure(release)\n self.procfile_structure = self._default_structure(release)\n self.save()\n # reset canonical process types if build type has changed.\n else:\n # find the previous release's build type\n prev_release = release.previous()\n if prev_release and prev_release.build:\n if prev_release.build.type != release.build.type:\n structure = self.structure.copy()\n # zero out canonical pod counts\n for proctype in ['cmd', 'web']:\n if proctype in structure:\n structure[proctype] = 0\n # update with the default process type.\n structure.update(self._default_structure(release))\n self.structure = structure\n # if procfile structure exists then we use it\n if release.build.procfile and \\\n release.build.sha and not \\\n release.build.dockerfile:\n self.procfile_structure = release.build.procfile\n self.save()\n\n # always set the procfile structure for any new release\n if release.build.procfile:\n self.procfile_structure = release.build.procfile\n self.save()\n\n # deploy application to k8s. Also handles initial scaling\n app_settings = self.appsettings_set.latest()\n deploys = {}\n for scale_type, replicas in self.structure.items():\n deploys[scale_type] = self._gather_app_settings(release, app_settings, scale_type, replicas) # noqa\n\n # Sort deploys so routable comes first\n deploys = OrderedDict(sorted(deploys.items(), key=lambda d: d[1].get('routable')))\n\n # Check if any proc type has a Deployment in progress\n self._check_deployment_in_progress(deploys, force_deploy)\n\n # use slugrunner image for app if buildpack app otherwise use normal image\n image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image\n\n try:\n # create the application config in k8s (secret in this case) for all deploy objects\n self.set_application_config(release)\n # only buildpack apps need access to object storage\n if release.build.type == 'buildpack':\n self.create_object_store_secret()\n\n # gather all proc types to be deployed\n tasks = [\n functools.partial(\n self._scheduler.deploy,\n namespace=self.id,\n name=self._get_job_id(scale_type),\n image=image,\n entrypoint=self._get_entrypoint(scale_type),\n command=self._get_command(scale_type),\n **kwargs\n ) for scale_type, kwargs in deploys.items()\n ]\n\n try:\n async_run(tasks)\n except KubeException as e:\n # Don't rollback if the previous release doesn't have a build which means\n # this is the first build and all the previous releases are just config changes.\n if rollback_on_failure and release.previous().build is not None:\n err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format('v{}'.format(release.version), \"v{}\".format(release.previous().version)) # noqa\n # This goes in the log before the rollback starts\n self.log(err, logging.ERROR)\n # revert all process types to old release\n self.deploy(release.previous(), force_deploy=True, rollback_on_failure=False)\n # let it bubble up\n raise DeisException('{}\\n{}'.format(err, str(e))) from e\n\n # otherwise just re-raise\n raise\n except Exception as e:\n # This gets shown to the end user\n err = '(app::deploy): {}'.format(e)\n self.log(err, logging.ERROR)\n raise ServiceUnavailable(err) from e\n\n app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None\n # Make sure the application is routable and uses the correct port done after the fact to\n # let initial deploy settle before routing traffic to the application\n if deploys and app_type:\n app_settings = self.appsettings_set.latest()\n if app_settings.whitelist:\n addresses = \",\".join(address for address in app_settings.whitelist)\n else:\n addresses = None\n service_annotations = {\n 'maintenance': app_settings.maintenance,\n 'whitelist': addresses\n }\n\n routable = deploys[app_type].get('routable')\n port = deploys[app_type].get('envs', {}).get('PORT', None)\n self._update_application_service(self.id, app_type, port, routable, service_annotations) # noqa\n\n # Wait until application is available in the router\n # Only run when there is no previous build / release\n old = release.previous()\n if old is None or old.build is None:\n self.verify_application_health(**deploys[app_type])\n\n # cleanup old release objects from kubernetes\n release.cleanup_old()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fix file permissions of the files contained within the archive file. Only need to ensure that the file is executable. Permissions will be change to 755 or 655 if needed. The change will occur within the archive file only the original file will be unchanged. This should be run after all files have been added to the archive file.
|
def _build_fix_file_permissions(self, archive_file: zipfile.ZipFile) -> None:
for file_info in archive_file.filelist:
current_perms = (
file_info.external_attr & self.ZIPFILE_PERMISSION_MASK
) >> 16
required_perm = 0o755 if current_perms & stat.S_IXUSR != 0 else 0o644
if current_perms != required_perm:
LOGGER.debug(
"fixing file permissions for %s: %o => %o",
file_info.filename,
current_perms,
required_perm,
)
file_info.external_attr = (
file_info.external_attr & ~self.ZIPFILE_PERMISSION_MASK
) | (required_perm << 16)
|
[
"def update_permission(self):\n\n from stat import S_IEXEC\n\n for data in self.files:\n if data not in ['iana', 'dir_structure']:\n stats = stat(self.destination + self.files[data])\n chmod(\n self.destination +\n self.files[data],\n stats.st_mode | S_IEXEC)\n\n return",
"def _fix_permissions(directory):\n os.chmod(directory, 0755)\n for root, dirs, files in os.walk(directory):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0755)\n for f in files:\n os.chmod(os.path.join(root, f), 0644)",
"def _fix_permission(self, path):\n if self._target_uid:\n try:\n os.chown(path, self._target_uid, self._target_gid)\n except OSError as e:\n # No such file (2) is okay\n # Permission denied (13) is okay\n if e.error not in (2, 13):\n raise",
"def __set_perms(self):\n\n if not self.meta_root:\n # Nothing to do.\n return\n\n files = [self._attrs.name]\n files.extend(self._attrs.parts.keys())\n files.extend(self._attrs.updates.keys())\n\n # Force file_mode, so that unprivileged users can read these.\n bad_modes = []\n for name in files:\n pathname = os.path.join(self.meta_root, name)\n try:\n if self.read_only:\n fmode = stat.S_IMODE(os.stat(\n pathname).st_mode)\n if fmode != self.__file_mode:\n bad_modes.append((pathname,\n \"{0:o}\".format(\n self.__file_mode),\n \"{0:o}\".format(fmode)))\n else:\n os.chmod(pathname, self.__file_mode)\n except EnvironmentError as e:\n # If the file doesn't exist yet, move on.\n if e.errno == errno.ENOENT:\n continue\n\n # If the mode change failed for another reason,\n # check to see if we actually needed to change\n # it, and if so, add it to bad_modes.\n fmode = stat.S_IMODE(os.stat(\n pathname).st_mode)\n if fmode != self.__file_mode:\n bad_modes.append((pathname,\n \"{0:o}\".format(self.__file_mode),\n \"{0:o}\".format(fmode)))\n\n if bad_modes:\n raise api_errors.BadCatalogPermissions(bad_modes)",
"def doRollover(self):\n # Rotate the file first.\n super().doRollover()\n\n # Add group write to the current permissions.\n cur_mode = os.stat(self.baseFilename).st_mode\n os.chmod(self.baseFilename, cur_mode | stat.S_IWGRP)",
"def change_permissions(root, folder_permissions, file_permissions):\n assert os.path.isdir(root)\n\n for prefix, folders, files in os.walk(root, topdown=False):\n for file in files:\n os.chmod(os.path.join(prefix, file), file_permissions)\n for folder in folders:\n os.chmod(os.path.join(prefix, folder), folder_permissions)\n os.chmod(root, folder_permissions)",
"def _repackage(self):\n file_suffix = f'_{APP_NAME}{self._file.extension}'\n filename = self._file.name.replace(self._file.extension, file_suffix)\n unlocked_filepath = os.path.join(APP_SAVE_DIR, filename)\n\n filepaths = self._get_file_listing(self._temp_processing_dir)\n with zipfile.ZipFile(unlocked_filepath,'w') as repackaged_zip:\n for filepath in filepaths:\n rel_filepath = filepath.replace(self._temp_processing_dir,'')\n repackaged_zip.write(filepath,arcname=rel_filepath)\n \n print('File repackaged...')",
"def enforce_file_permissions(path):\n # source: dcos-cli\n\n if not os.path.isfile(path):\n raise Exception(\"Path [{}] is not a file\".format(path))\n\n permissions = oct(stat.S_IMODE(os.stat(path).st_mode))\n if permissions not in [\"0o600\", \"0600\", \"0o400\", \"0400\"]:\n if os.path.realpath(path) != path:\n path = \"%s (pointed to by %s)\" % (os.path.realpath(path), path)\n msg = (\n \"Permissions '{}' for configuration file '{}' are too open. \"\n \"File must only be accessible by owner. \"\n \"Aborting...\".format(permissions, path)\n )\n raise Exception(msg)",
"def fix_permissions(prefix):\n utils.run_subprocess(\n [\"chown\", \"-R\", \"{}:{}\".format(os.getuid(), os.getgid()), prefix]\n )\n utils.run_subprocess([\"chmod\", \"-R\", \"o-w\", prefix])",
"def test_safe_copy_removes_exec_perms(self):\n pass",
"def initialEnforce(path, user, group, filePermissions,\n dirPermissions, enforceRoot = True):\n \n for root, dirs, files in os.walk(path):\n if enforceRoot:\n setPermissionsForFile(root, user, group, filePermissions, dirPermissions)\n else:\n for d in dirs:\n setPermissionsForFile(os.path.join(root, d), user, group, filePermissions, dirPermissions)\n\n for f in files:\n setPermissionsForFile(os.path.join(root, f), user, group, filePermissions, dirPermissions)",
"def restor_perms(local_file, headers):\r\n\r\n # Restore Permissions.\r\n os.chmod(\r\n local_file,\r\n int(headers['x-object-meta-perms'], 8)\r\n )\r\n\r\n # Lookup user and group name and restore them.\r\n os.chown(\r\n local_file,\r\n pwd.getpwnam(headers['x-object-meta-owner']).pw_uid,\r\n grp.getgrnam(headers['x-object-meta-group']).gr_gid\r\n )",
"def jboss_fix_privileges(self):\n usr = self.jboss.get_user()\n self.sysconfig.exec_shell('sudo chown -R %s:%s %s' % (usr, usr, self.get_ejbca_home()))\n self.jboss.fix_privileges()",
"def update_ejbca_from_file(self, archive_path, basedir):\n cmd = 'sudo tar -xzf %s' % archive_path\n ret, out, err = self.sysconfig.cli_cmd_sync(cmd, write_dots=True, cwd=basedir)\n if ret != 0:\n raise errors.SetupError('Could not extract update archive')\n\n folders = [f for f in os.listdir(basedir) if not os.path.isfile(os.path.join(basedir, f))\n and f != '.' and f != '..']\n\n if len(folders) != 1:\n raise errors.SetupError('Invalid folder structure after update extraction')\n\n archive_dir = os.path.join(basedir, folders[0])\n if not os.path.exists(archive_dir):\n raise errors.SetupError('Directory with ejbca not found in the update archive: %s' % archive_dir)\n if not os.path.exists(os.path.join(archive_dir, 'build.xml')):\n raise errors.SetupError('Invalid update archive, build.xml not found in %s' % archive_dir)\n\n archive_slash = util.add_ending_slash(archive_dir)\n dest_slash = util.add_ending_slash(self.get_ejbca_home())\n\n excludes = ''\n if self.doing_reinstall:\n excludes = ' '.join(['--exclude %s' % util.escape_shell(util.add_ending_slash(x))\n for x in self.EXCLUDE_REINSTALL])\n\n cmd = 'sudo rsync -av --delete %s %s %s' \\\n % (excludes, util.escape_shell(archive_slash), util.escape_shell(dest_slash))\n ret, out, err = self.sysconfig.cli_cmd_sync(cmd, write_dots=True, cwd=basedir)\n if ret != 0:\n raise errors.SetupError('EJBCA sync failed')\n\n self.jboss_fix_privileges()",
"def _update(self, remove=[]):\n nzfp = os.path.join(\n tempfile.gettempdir(), \"tempzip_{t}.zip\".format(t=time.time())\n )\n op = self.zf.fp.name\n pswd = self.zf.pwd\n comment = self.zf.comment\n nzf = zipfile.ZipFile(nzfp, \"w\", self.zf.compression, True)\n infos = self.zf.infolist()\n for zipinfo in infos:\n add = True\n for rm in remove:\n if zipinfo.filename.startswith(rm):\n add = False\n break\n if not add:\n continue\n ofo = self.zf.open(zipinfo)\n nzf.writestr(zipinfo, ofo.read())\n self.zf.close()\n os.remove(op)\n nzf.close()\n shutil.copy(nzfp, op)\n self.zf = zipfile.ZipFile(op, \"a\", zipfile.ZIP_DEFLATED, True)\n self.zf.setpassword(pswd)\n self.zf.comment = comment",
"def test_modify_files(self):\n logger.info(\"Modify files by write a+(extend size:1k)\")\n for file_path in self.Files:\n md5 = self.create_file(file_path, \"1K\", 128, 'a+')\n self.Md5Csum[file_path] = md5\n return True",
"def test_logging_when_unable_to_add_directory(self):\n simple_path = self.get_data(\"simple\")\n file_path = os.path.join(simple_path, \"file.txt\")\n original = os.stat(file_path)[ST_MODE]\n\n # Change permissions to none\n os.chmod(file_path, 0)\n\n with storelet.ZipBackup(\"test\") as b:\n b.include_directory(simple_path)\n self.assertLogged(\"warning\", \"Could not add file\")\n\n # Put the permissions back\n os.chmod(file_path, original)",
"def recursive_chmod(base_path, permissions):\r\n if path.isfile(base_path):\r\n chmod(base_path, permissions)\r\n else:\r\n for dir_path, _, file_names in walk(base_path):\r\n chmod(dir_path, permissions)\r\n for file in file_names:\r\n chmod(path.join(dir_path, file), permissions)",
"def setperms(self,filename,chmod,chgrp,chown):\n \n #chmod\n if chmod:\n perm=int(chmod,8)\n try:\n os.chmod(filename, perm)\n except:\n self.logger.error('could not set permission on file %s'%filename)\n\n #chgrp\n changetogroup=-1\n if chgrp:\n group=None\n try:\n group=grp.getgrnam(chgrp)\n except KeyError:\n pass\n \n try:\n group=grp.getgrgid(int(chgrp))\n except KeyError:\n pass\n except ValueError:\n pass\n \n if group!=None:\n changetogroup=group.gr_gid\n else:\n self.logger.warn(\"Group %s not found\"%chgrp)\n\n #chown\n changetouser=-1\n if chown:\n user=None\n try:\n user=pwd.getpwnam(chown)\n except KeyError:\n pass\n \n try:\n user=pwd.getpwuid(int(chown))\n except KeyError:\n pass\n except ValueError:\n pass\n \n if user!=None:\n changetouser=user.pw_uid\n else:\n self.logger.warn(\"User %s not found\"%chown)\n \n if changetogroup!=-1 or changetouser!=-1:\n try:\n os.chown(filename, changetouser, changetogroup)\n except Exception,e:\n self.logger.error(\"Could not change user/group of file %s : %s\"%(filename,str(e)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handle installing & zipping dependencies.
|
def _build_zip_dependencies(
self,
archive_file: zipfile.ZipFile,
) -> None:
self.project.install_dependencies()
for dep in self.iterate_dependency_directory():
archive_file.write(
dep,
self.insert_layer_dir(
dep, self.project.dependency_directory
).relative_to(self.project.dependency_directory)
if self.usage_type == "layer"
else dep.relative_to(self.project.dependency_directory),
)
|
[
"def _install_dependencies(self):\n for package in self._dependencies:\n print('installing dependency %s...' % package)\n process_args = [\n self.__python, '-m', 'pip', 'install', '--upgrade', package\n ]\n subprocess.Popen(process_args, shell=False).wait()",
"def create_dependency_layer():\n\n # file paths\n requirements_file_path = \"requirements.txt\"\n target_directory = \"python\"\n zip_file_path = \"dependency-layer.zip\"\n\n # change directory so that relative paths work\n cwd = os.getcwd()\n os.chdir(\"lambda\")\n\n # create new dependency zip only if it doesn't exist\n if not os.path.isfile(zip_file_path):\n\n pip_main(\n [\n \"install\",\n \"-r\",\n requirements_file_path,\n \"--target\",\n target_directory,\n ]\n )\n\n # package dependencies as a zip file\n dep_zip = zipfile.ZipFile(zip_file_path, \"w\", zipfile.ZIP_DEFLATED)\n\n for root, dirs, files in os.walk(target_directory):\n for file in files:\n dep_zip.write(os.path.join(root, file))\n\n dep_zip.close()\n\n # change directory back\n os.chdir(cwd)",
"def install_dependencies():\n local('pip install --upgrade setuptools pip')",
"def installpackage():\n buildpackage()\n install_package_command = ['pip', 'install', os.path.realpath('./dist/hey_helper-0.0.4-py3-none-any.whl')]\n _run_command(install_package_command)",
"def prereposetup_hook(conduit):\n global downloader\n opts, args = conduit.getCmdLine()\n if args[0] == \"install\":\n\n yum_vars = dict(awsdomain=\"amazonaws.com\", awsregion=\"default\", releasever=\"2017.12\")\n yum_vars.update(conduit.getConf().yumvar)\n\n catalog_url = os.environ.get(\"CATALOGURL\", \"http://amazonlinux.{awsregion}.{awsdomain}/{releasever}/extras-catalog.json\").format(**yum_vars)\n downloader = threading.Thread(target=download, args=(catalog_url,))\n downloader.start()",
"def install_files(self):\n install = self.reinitialize_command('install', reinit_subcommands=1)\n install.root = self.bdist_dir\n install.install_lib = self.install_lib\n install.install_data = self.install_data\n install.warn_dir = 0\n # Compiling is already done in build-step, no need to recompile. This\n # doesn't even work with Python 3 if source files contain Python 2\n # spesific syntax\n install.compile = 0\n # No need for egg metadata and executable scripts in wotmod package\n install.sub_commands = [cmd for cmd in install.sub_commands if cmd[0] != 'install_egg_info']\n install.sub_commands = [cmd for cmd in install.sub_commands if cmd[0] != 'install_scripts']\n log.info(\"installing to %s\" % self.bdist_dir)\n self.run_command('install')",
"def dependencies(default_env='dev'):\n pip_install(default_env)\n npm_install()\n bower_install()\n nltk_init()",
"def post_setup(self, context):\n os.environ[\"VIRTUAL_ENV\"] = context.env_dir\n # if not self.nodist:\n # self.install_setuptools(context)\n # Can't install pip without setuptools\n if not self.nopip and not self.nodist:\n self.install_pip(context)",
"def install():\n\n # Add archzfs repository\n config_file = '/etc/pacman.conf'\n append(config_file, '[demz-repo-core]', use_sudo=True)\n append(config_file, 'Server = http://demizerone.com/$repo/$arch', use_sudo=True)\n\n # Add key\n sudo('pacman-key -r 0EE7A126')\n sudo('pacman-key --lsign-key 0EE7A126')\n\n # Update the package database\n arch.update_index()\n\n # Install package\n require.arch.package('archzfs')\n\n # Synchronize user\n dotfiles.sync('fabrecipes/zfs/user/', '$HOME/')\n dotfiles.sync('fabrecipes/zfs/sys/', '/', use_sudo='true')",
"def ziplib(self):\n temp_lib_dynload = self.prefix_lib / \"lib-dynload\"\n temp_os_py = self.prefix_lib / \"os.py\"\n\n self.remove(self.site_packages)\n self.lib_dynload.rename(temp_lib_dynload)\n self.copyfile(self.python_lib / \"os.py\", temp_os_py)\n\n zip_path = self.prefix_lib / f\"python{self.ver_nodot}\"\n shutil.make_archive(str(zip_path), \"zip\", str(self.python_lib))\n\n self.remove(self.python_lib)\n self.python_lib.mkdir()\n temp_lib_dynload.rename(self.lib_dynload)\n temp_os_py.rename(self.python_lib / \"os.py\")\n self.site_packages.mkdir()",
"def install_check():\n reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0] for r in reqs.split()]\n\n not_installed_packages = []\n if 'colorama' not in installed_packages:\n not_installed_packages.append('colorama')\n if 'scapy' not in installed_packages:\n not_installed_packages.append('scapy')\n\n if len(not_installed_packages) != 0:\n installer(not_installed_packages)",
"def requirements():\n\n run('pip install -r {req}'.format(req=REQUIREMENTS))",
"def install(self):\n with use_dir(self.package_dir):\n self._call_hook('pre_install')\n\n for link in self.links:\n link.install(force=self.force)\n\n for sub_package in self.sub_packages:\n sub_package.install()\n\n self._call_hook('post_install')",
"def Install( lib_name,\r\n pip_arg=None,\r\n output_stream=sys.stdout,\r\n ):\r\n\r\n pip_args = pip_arg; del pip_arg\r\n\r\n repo_root = os.getenv(RepositoryBootstrapConstants.DE_REPO_ROOT_NAME)\r\n\r\n scm = GetSCM(repo_root, raise_on_error=False)\r\n if not scm:\r\n output_stream.write(\"ERROR: No SCM is active for '{}'.\\n\".format(repo_root))\r\n return -1\r\n\r\n if scm.HasWorkingChanges(repo_root) or scm.HasUntrackedWorkingChanges(repo_root):\r\n output_stream.write(\"ERROR: Changes were detected in '{}'; please revert/shelve these changes and run this script again.\\n\".format(repo_root))\r\n return -1\r\n\r\n with StreamDecorator(output_stream).DoneManager( line_prefix='',\r\n prefix=\"\\nResults: \",\r\n suffix='\\n',\r\n ) as dm:\r\n pip_command_line = 'pip install \"{}\"{}'.format( lib_name,\r\n '' if not pip_args else \" {}\".format(' '.join(pip_args)),\r\n )\r\n\r\n dm.stream.write(\"\\nDetecting libraries...\")\r\n with dm.stream.DoneManager( suffix='\\n',\r\n ) as this_dm:\r\n libraries = []\r\n\r\n # ----------------------------------------------------------------------\r\n def OnOutput(line):\r\n this_dm.stream.write(line)\r\n\r\n if not line.startswith(\"Installing collected packages: \"):\r\n return True\r\n\r\n line = line[len(\"Installing collected packages: \"):]\r\n\r\n for library in line.split(','):\r\n library = library.strip()\r\n if library:\r\n libraries.append(library)\r\n\r\n return False\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n this_dm.result = Process.Execute( pip_command_line,\r\n OnOutput,\r\n line_delimited_output=True,\r\n )\r\n\r\n if libraries:\r\n this_dm.result = 0\r\n\r\n if this_dm.result != 0:\r\n return this_dm.result\r\n\r\n if not libraries:\r\n return dm.result\r\n\r\n dm.stream.write(\"Reverting local changes...\")\r\n with dm.stream.DoneManager( suffix='\\n',\r\n ) as this_dm:\r\n this_dm.result = scm.Clean(repo_root, no_prompt=True)[0]\r\n\r\n if this_dm.result != 0:\r\n return this_dm.result\r\n\r\n dm.stream.write(\"Reverting existing libraries...\")\r\n with dm.stream.DoneManager( suffix='\\n',\r\n ) as this_dm:\r\n python_lib_dir = os.path.join( os.getenv(RepositoryBootstrapConstants.DE_REPO_GENERATED_NAME),\r\n PythonActivationActivity.Name,\r\n _EnvironmentSettings().LibraryDir,\r\n )\r\n assert os.path.isdir(python_lib_dir), python_lib_dir\r\n\r\n library_items = {}\r\n\r\n for name in os.listdir(python_lib_dir):\r\n fullpath = os.path.join(python_lib_dir, name)\r\n\r\n if not os.path.isdir(fullpath):\r\n continue\r\n\r\n library_items[name.lower()] = CurrentShell.IsSymLink(fullpath)\r\n\r\n # ----------------------------------------------------------------------\r\n def RemoveItem(name):\r\n name_lower = name.lower()\r\n\r\n if library_items[name_lower]:\r\n this_dm.stream.write(\"Removing '{}' for upgrade.\\n\".format(name))\r\n os.remove(os.path.join(python_lib_dir, name))\r\n else:\r\n this_dm.stream.write(\"Removing temporary '{}'.\\n\".format(name))\r\n FileSystem.RemoveTree(os.path.join(python_lib_dir, name))\r\n\r\n del library_items[name_lower]\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n for library in libraries:\r\n potential_library_names = [ library, ]\r\n\r\n # Sometimes, a library's name will begin with a 'Py' but be saved in\r\n # the file system without the 'Py' prefix. Account for that scenario.\r\n if library.lower().startswith(\"py\"):\r\n potential_library_names.append(library[len(\"py\"):])\r\n\r\n # Replace dashes with underscores\r\n underscored_library_name = potential_library_names[0].replace(\"-\", \"_\")\r\n if underscored_library_name != potential_library_names[0]:\r\n potential_library_names.append(underscored_library_name)\r\n\r\n for potential_library_name in potential_library_names:\r\n potential_library_name_lower = potential_library_name.lower()\r\n\r\n if potential_library_name_lower not in library_items:\r\n continue\r\n\r\n RemoveItem(potential_library_name)\r\n\r\n # Is there dist- or egg-info as well?\r\n info_items = []\r\n\r\n for item in six.iterkeys(library_items):\r\n if ( item.startswith(potential_library_name_lower) and\r\n (item.endswith(\".dist-info\") or item.endswith(\".egg-info\"))\r\n ):\r\n info_items.append(item)\r\n\r\n for info_item in info_items:\r\n RemoveItem(info_item)\r\n\r\n break\r\n\r\n dm.stream.write(\"Installing...\")\r\n with dm.stream.DoneManager() as this_dm:\r\n this_dm.result = Process.Execute(pip_command_line, this_dm.stream)\r\n\r\n return dm.result",
"def install_additional_packages():\n if INSTALLER == 'APT':\n os.system('apt-get -y update && apt-get -y install wget apache2 git \\\n && service apache2 start')\n elif INSTALLER == 'YUM':\n os.system('yum update -y && yum install -y wget httpd git \\\n && service httpd start')\n elif INSTALLER == 'ZYPPER':\n os.system('zypper update -y && zypper install -y wget httpd git \\\n && service apache2 start')",
"def deploy_pypi():\n test()\n register_pypi()\n deploy_src()\n deploy_eggs()",
"def create_lambda_zip(self, prefix='lambda_package', handler_file=None,\n minify=True, exclude=None, use_precompiled_packages=True, include=None, venv=None):\n import pip\n\n print(\"Packaging project as zip...\")\n\n if not venv:\n if 'VIRTUAL_ENV' in os.environ:\n venv = os.environ['VIRTUAL_ENV']\n elif os.path.exists('.python-version'): # pragma: no cover\n logger.debug(\"Pyenv's local virtualenv detected.\")\n try:\n subprocess.check_output('pyenv', stderr=subprocess.STDOUT)\n except OSError:\n print(\"This directory seems to have pyenv's local venv\"\n \"but pyenv executable was not found.\")\n with open('.python-version', 'r') as f:\n env_name = f.read()[:-1]\n logger.debug('env name = {}'.format(env_name))\n bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')\n venv = bin_path[:bin_path.rfind(env_name)] + env_name\n logger.debug('env path = {}'.format(venv))\n else: # pragma: no cover\n print(\"Zappa requires an active virtual environment.\")\n quit()\n\n cwd = os.getcwd()\n zip_fname = prefix + '-' + str(int(time.time())) + '.zip'\n zip_path = os.path.join(cwd, zip_fname)\n\n # Files that should be excluded from the zip\n if exclude is None:\n exclude = list()\n\n # Exclude the zip itself\n exclude.append(zip_path)\n\n def splitpath(path):\n parts = []\n (path, tail) = os.path.split(path)\n while path and tail:\n parts.append(tail)\n (path, tail) = os.path.split(path)\n parts.append(os.path.join(path, tail))\n return map(os.path.normpath, parts)[::-1]\n split_venv = splitpath(venv)\n split_cwd = splitpath(cwd)\n\n # Ideally this should be avoided automatically,\n # but this serves as an okay stop-gap measure.\n if split_venv[-1] == split_cwd[-1]: # pragma: no cover\n print(\n \"Warning! Your project and virtualenv have the same name! You may want \"\n \"to re-create your venv with a new name, or explicitly define a \"\n \"'project_name', as this may cause errors.\"\n )\n\n # First, do the project..\n temp_project_path = os.path.join(tempfile.gettempdir(), str(int(time.time())))\n\n if minify:\n excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]\n copytree(cwd, temp_project_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(cwd, temp_project_path, symlinks=False)\n\n # Then, do the site-packages..\n temp_package_path = os.path.join(tempfile.gettempdir(), str(int(time.time() + 1)))\n if os.sys.platform == 'win32':\n site_packages = os.path.join(venv, 'Lib', 'site-packages')\n else:\n site_packages = os.path.join(venv, 'lib', 'python2.7', 'site-packages')\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages, temp_package_path, symlinks=False)\n\n # We may have 64-bin specific packages too.\n site_packages_64 = os.path.join(venv, 'lib64', 'python2.7', 'site-packages')\n if os.path.exists(site_packages_64):\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages_64, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages_64, temp_package_path, symlinks=False)\n\n copy_tree(temp_package_path, temp_project_path, update=True)\n\n # Then the pre-compiled packages..\n if use_precompiled_packages:\n installed_packages_name_set = {package.project_name.lower() for package in\n pip.get_installed_distributions()}\n\n for name, details in lambda_packages.items():\n if name.lower() in installed_packages_name_set:\n tar = tarfile.open(details['path'], mode=\"r:gz\")\n for member in tar.getmembers():\n # If we can, trash the local version.\n if member.isdir():\n shutil.rmtree(os.path.join(temp_project_path, member.name), ignore_errors=True)\n continue\n\n tar.extract(member, temp_project_path)\n\n # If a handler_file is supplied, copy that to the root of the package,\n # because that's where AWS Lambda looks for it. It can't be inside a package.\n if handler_file:\n filename = handler_file.split(os.sep)[-1]\n shutil.copy(handler_file, os.path.join(temp_project_path, filename))\n\n # Then zip it all up..\n try:\n # import zlib\n compression_method = zipfile.ZIP_DEFLATED\n except ImportError: # pragma: no cover\n compression_method = zipfile.ZIP_STORED\n\n zipf = zipfile.ZipFile(zip_path, 'w', compression_method)\n for root, dirs, files in os.walk(temp_project_path):\n\n for filename in files:\n\n # If there is a .pyc file in this package,\n # we can skip the python source code as we'll just\n # use the compiled bytecode anyway..\n if filename[-3:] == '.py':\n abs_filname = os.path.join(root, filename)\n abs_pyc_filename = abs_filname + 'c'\n if os.path.isfile(abs_pyc_filename):\n\n # but only if the pyc is older than the py,\n # otherwise we'll deploy outdated code!\n py_time = os.stat(abs_filname).st_mtime\n pyc_time = os.stat(abs_pyc_filename).st_mtime\n\n if pyc_time > py_time:\n continue\n\n zipf.write(os.path.join(root, filename), os.path.join(root.replace(temp_project_path, ''), filename))\n\n if '__init__.py' not in files:\n tmp_init = os.path.join(temp_project_path, '__init__.py')\n open(tmp_init, 'a').close()\n zipf.write(tmp_init, os.path.join(root.replace(temp_project_path, ''), os.path.join(root.replace(temp_project_path, ''), '__init__.py')))\n\n # And, we're done!\n zipf.close()\n\n # Trash the temp directory\n shutil.rmtree(temp_project_path)\n shutil.rmtree(temp_package_path)\n\n # Warn if this is too large for Lambda.\n file_stats = os.stat(zip_path)\n if file_stats.st_size > 52428800: # pragma: no cover\n print(\"\\n\\nWarning: Application zip package is likely to be too large for AWS Lambda.\\n\\n\")\n\n return zip_fname",
"def fetch_pkg_dependencies(config, pkg_name):\n requirements = load_requirements(config)\n f5_reqs, other_reqs = categorize_requirements(requirements)\n\n # Copy pkg package to /tmp\n print(\"Copying package to /tmp install directory\")\n try:\n tmp_pkg_name = \"/tmp/\" + os.path.basename(pkg_name)\n shutil.copyfile(pkg_name, tmp_pkg_name)\n except Exception as error:\n print(\"Failed\")\n return InstallError(str(error), pkg_name, tmp_pkg_name,\n frame=gfi(cf()),\n errnum=errno.EIO,\n msg=\"Failed to copy f5-sdk package!\")\n print(\"Success\")\n\n print(\"Compare structured pkg dependencies against what was built\")\n try:\n reqs_from_pkg = read_pkg_reqs(tmp_pkg_name)\n compare_reqs(reqs_from_pkg, requirements)\n except InstallError as error:\n print(\"Failed\")\n return error\n\n # handle dependency installation:\n print(\"Installing Dependencies:\")\n try:\n handle_f5_dependencies(f5_reqs)\n handle_other_dependencies(other_reqs)\n except InstallError as error:\n print(\"Failed\")\n return error\n\n print(\"Installing Self - %s\" % pkg_name)\n try:\n output, result = runCommand('rpm -i %s 2>&1' % tmp_pkg_name)\n if not result == 0:\n raise InstallError(\"Exit status was {}\".format(result))\n except InstallError as error:\n print(\"Failed to get requirements for %s.\" % (pkg_name))\n return error\n print(\"Success\")",
"def install_PyPI_packages(checkout):\n libs_path = checkout / \"pythonFiles\" / \"lib\" / \"python\"\n requirements_path = checkout / \"requirements.txt\"\n cmd = [\n sys.executable,\n \"-m\",\n \"pip\",\n \"-q\",\n \"--disable-pip-version-check\",\n \"install\",\n \"--target\",\n os.fspath(libs_path),\n \"--no-cache-dir\",\n \"--implementation\",\n \"py\",\n \"--no-deps\",\n \"--upgrade\",\n \"-r\",\n os.fspath(requirements_path),\n ]\n run_command(cmd)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handle zipping the project source code.
|
def _build_zip_source_code(self, archive_file: zipfile.ZipFile) -> None:
for src_file in self.project.source_code:
archive_file.write(
src_file,
self.insert_layer_dir(
src_file, self.project.source_code.root_directory
).relative_to(self.project.source_code.root_directory)
if self.usage_type == "layer"
else src_file.relative_to(self.project.source_code.root_directory),
)
|
[
"def create_lambda_zip(self, prefix='lambda_package', handler_file=None,\n minify=True, exclude=None, use_precompiled_packages=True, include=None, venv=None):\n import pip\n\n print(\"Packaging project as zip...\")\n\n if not venv:\n if 'VIRTUAL_ENV' in os.environ:\n venv = os.environ['VIRTUAL_ENV']\n elif os.path.exists('.python-version'): # pragma: no cover\n logger.debug(\"Pyenv's local virtualenv detected.\")\n try:\n subprocess.check_output('pyenv', stderr=subprocess.STDOUT)\n except OSError:\n print(\"This directory seems to have pyenv's local venv\"\n \"but pyenv executable was not found.\")\n with open('.python-version', 'r') as f:\n env_name = f.read()[:-1]\n logger.debug('env name = {}'.format(env_name))\n bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')\n venv = bin_path[:bin_path.rfind(env_name)] + env_name\n logger.debug('env path = {}'.format(venv))\n else: # pragma: no cover\n print(\"Zappa requires an active virtual environment.\")\n quit()\n\n cwd = os.getcwd()\n zip_fname = prefix + '-' + str(int(time.time())) + '.zip'\n zip_path = os.path.join(cwd, zip_fname)\n\n # Files that should be excluded from the zip\n if exclude is None:\n exclude = list()\n\n # Exclude the zip itself\n exclude.append(zip_path)\n\n def splitpath(path):\n parts = []\n (path, tail) = os.path.split(path)\n while path and tail:\n parts.append(tail)\n (path, tail) = os.path.split(path)\n parts.append(os.path.join(path, tail))\n return map(os.path.normpath, parts)[::-1]\n split_venv = splitpath(venv)\n split_cwd = splitpath(cwd)\n\n # Ideally this should be avoided automatically,\n # but this serves as an okay stop-gap measure.\n if split_venv[-1] == split_cwd[-1]: # pragma: no cover\n print(\n \"Warning! Your project and virtualenv have the same name! You may want \"\n \"to re-create your venv with a new name, or explicitly define a \"\n \"'project_name', as this may cause errors.\"\n )\n\n # First, do the project..\n temp_project_path = os.path.join(tempfile.gettempdir(), str(int(time.time())))\n\n if minify:\n excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]\n copytree(cwd, temp_project_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(cwd, temp_project_path, symlinks=False)\n\n # Then, do the site-packages..\n temp_package_path = os.path.join(tempfile.gettempdir(), str(int(time.time() + 1)))\n if os.sys.platform == 'win32':\n site_packages = os.path.join(venv, 'Lib', 'site-packages')\n else:\n site_packages = os.path.join(venv, 'lib', 'python2.7', 'site-packages')\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages, temp_package_path, symlinks=False)\n\n # We may have 64-bin specific packages too.\n site_packages_64 = os.path.join(venv, 'lib64', 'python2.7', 'site-packages')\n if os.path.exists(site_packages_64):\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages_64, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages_64, temp_package_path, symlinks=False)\n\n copy_tree(temp_package_path, temp_project_path, update=True)\n\n # Then the pre-compiled packages..\n if use_precompiled_packages:\n installed_packages_name_set = {package.project_name.lower() for package in\n pip.get_installed_distributions()}\n\n for name, details in lambda_packages.items():\n if name.lower() in installed_packages_name_set:\n tar = tarfile.open(details['path'], mode=\"r:gz\")\n for member in tar.getmembers():\n # If we can, trash the local version.\n if member.isdir():\n shutil.rmtree(os.path.join(temp_project_path, member.name), ignore_errors=True)\n continue\n\n tar.extract(member, temp_project_path)\n\n # If a handler_file is supplied, copy that to the root of the package,\n # because that's where AWS Lambda looks for it. It can't be inside a package.\n if handler_file:\n filename = handler_file.split(os.sep)[-1]\n shutil.copy(handler_file, os.path.join(temp_project_path, filename))\n\n # Then zip it all up..\n try:\n # import zlib\n compression_method = zipfile.ZIP_DEFLATED\n except ImportError: # pragma: no cover\n compression_method = zipfile.ZIP_STORED\n\n zipf = zipfile.ZipFile(zip_path, 'w', compression_method)\n for root, dirs, files in os.walk(temp_project_path):\n\n for filename in files:\n\n # If there is a .pyc file in this package,\n # we can skip the python source code as we'll just\n # use the compiled bytecode anyway..\n if filename[-3:] == '.py':\n abs_filname = os.path.join(root, filename)\n abs_pyc_filename = abs_filname + 'c'\n if os.path.isfile(abs_pyc_filename):\n\n # but only if the pyc is older than the py,\n # otherwise we'll deploy outdated code!\n py_time = os.stat(abs_filname).st_mtime\n pyc_time = os.stat(abs_pyc_filename).st_mtime\n\n if pyc_time > py_time:\n continue\n\n zipf.write(os.path.join(root, filename), os.path.join(root.replace(temp_project_path, ''), filename))\n\n if '__init__.py' not in files:\n tmp_init = os.path.join(temp_project_path, '__init__.py')\n open(tmp_init, 'a').close()\n zipf.write(tmp_init, os.path.join(root.replace(temp_project_path, ''), os.path.join(root.replace(temp_project_path, ''), '__init__.py')))\n\n # And, we're done!\n zipf.close()\n\n # Trash the temp directory\n shutil.rmtree(temp_project_path)\n shutil.rmtree(temp_package_path)\n\n # Warn if this is too large for Lambda.\n file_stats = os.stat(zip_path)\n if file_stats.st_size > 52428800: # pragma: no cover\n print(\"\\n\\nWarning: Application zip package is likely to be too large for AWS Lambda.\\n\\n\")\n\n return zip_fname",
"def zipfile(self):\n ...",
"def zip_source(options):\n\n version = _get_local_version()\n\n source_dir = os.path.join('tmp', 'source')\n invest_bin_zip = os.path.join('tmp', 'invest-bin.zip')\n invest_dir = os.path.join('tmp', 'source', 'invest-bin')\n dist_dir = 'dist'\n try:\n dry('mkdir -p %s' % dist_dir, os.makedirs, source_dir)\n except OSError:\n # Folder already exists. Skipping.\n pass\n sh('hg archive %s' % invest_bin_zip)\n\n def _unzip(zip_uri, dest_dir):\n def _unzip_func():\n zip = zipfile.ZipFile(zip_uri)\n zip.extractall(dest_dir)\n dry('unzip -o %s -d %s' % (zip_uri, dest_dir), _unzip_func)\n\n _unzip(invest_bin_zip, source_dir)\n\n for dirname in map(lambda x: x.local_path, REPOS):\n if not dirname[0:3] in ['doc', 'src']:\n continue\n\n if dirname.startswith('src'):\n source_dir = os.path.join(invest_dir, 'src')\n elif dirname.startswith('doc'):\n source_dir = os.path.join(invest_dir, 'doc')\n\n projectname = dirname[4:] # remove the / as well.\n unzipped_dir = os.path.join(source_dir, projectname)\n print unzipped_dir\n try:\n dry('rm -r %s' % unzipped_dir, shutil.rmtree, unzipped_dir)\n except OSError:\n # when the source dir doesn't exist, that's ok.\n pass\n\n sh('hg archive -R %(repo)s tmp/%(zipname)s.zip' % {\n 'repo': dirname, 'zipname': projectname})\n\n zipfile_name = projectname + '.zip'\n _unzip(os.path.join('tmp', zipfile_name), source_dir)\n\n # leave off the .zip filename here. shutil.make_archive adds it based on\n # the format of the archive.\n archive_name = os.path.abspath(os.path.join('dist', 'InVEST-source-%s' % version))\n dry('zip -r %s %s.zip' % ('invest-bin', archive_name),\n shutil.make_archive, **{\n 'base_name': archive_name,\n 'format': 'zip',\n 'root_dir': source_dir,\n 'base_dir': '.'})",
"def process_zip(self, *args):\n self.unzip_files()\n self.process.process_files(self.temp_directory, *args)\n self.zip_files()",
"def unzipProject(self):\n if os.path.isdir(self.folder) == False:\n os.mkdir(self.folder)\n\n if self.myZipfile.endswith(\"zip\"):\n zipObject = zipfile.ZipFile(self.myZipfile, 'r')\n zipObject.extractall(path=self.folder)\n\n if self.myZipfile.endswith(\"tgz\"):\n print(self.getProcessOutput([\"tar\", '-xzvf', self.myZipfile, '-C', self.folder]))",
"def process_demo_package(self):\n # TODO: Move to zip file field?\n\n # Derive a directory name from the zip filename, clean up any existing\n # directory before unpacking.\n new_root_dir = self.demo_package.path.replace('.zip','')\n if isdir(new_root_dir):\n rmtree(new_root_dir)\n\n # Load up the zip file and extract the valid entries\n zf = zipfile.ZipFile(self.demo_package.file)\n valid_entries = Submission.get_valid_demo_zipfile_entries(zf) \n\n for zi in valid_entries:\n if type(zi.filename) is unicode:\n zi_filename = zi.filename\n else:\n zi_filename = zi.filename.decode('utf-8', 'ignore')\n\n # HACK: Normalize demo.html to index.html\n if zi_filename == u'demo.html':\n zi_filename = u'index.html'\n\n # Relocate all files from detected root dir to a directory named\n # for the zip file in storage\n out_fn = u'%s/%s' % (new_root_dir, zi_filename)\n out_dir = dirname(out_fn)\n\n # Create parent directories where necessary.\n if not isdir(out_dir):\n makedirs(out_dir.encode('utf-8'), 0775)\n\n # Extract the file from the zip into the desired location.\n fout = open(out_fn.encode('utf-8'), 'wb')\n copyfileobj(zf.open(zi), fout)",
"def _unzip_prism(self):\n zip_paths = list(self.target_dir.glob(\"*.zip\"))\n for zip_path in zip_paths:\n with zipfile.ZipFile(zip_path, \"r\") as zref:\n zref.extractall(self.target_dir)",
"def zip_sources(globs: Dict, filename: str):\n\n py_str, sources, dependencies = SourcePacker.gather_sources_and_dependencies(\n globs=globs\n )\n repo, branch, commit = SourcePacker.git_info(globs.get(\"__file__\"))\n cmd = \" \".join(sys.argv)\n\n with zipfile.ZipFile(filename, mode=\"w\") as zf:\n for source in sources:\n zf.write(source)\n\n zf.writestr(\"python_version.txt\", py_str)\n dep_str = \"\\n\".join(dependencies)\n zf.writestr(\"modules.txt\", dep_str)\n git_str = \"Repository: {}\\nBranch: {}\\nCommit: {}\".format(\n repo, branch, commit\n )\n zf.writestr(\"git_info.txt\", git_str)\n zf.writestr(\"command.txt\", cmd)",
"def archive_file(self) -> Path:\n return self.project.build_directory / (\n f\"{self.project.source_code.root_directory.name}.\"\n + (\"layer.\" if self.usage_type == \"layer\" else \"\")\n + f\"{self.runtime}.{self.project.source_code.md5_hash}.zip\"\n )",
"def _zip_package(self, package_path):\n return MuranoPackageManager(self.task)._prepare_package(package_path)",
"def rezip(self):\n\n exclude_files = ['.DS_Store', 'mimetype', 'iTunesMetadata.plist']\n parent_dir, dir_to_zip = os.path.split(self.zipdir)\n\n def trim(path):\n \"\"\"Prepare archive path\"\"\"\n zip_path = path.replace(parent_dir, \"\", 1)\n if parent_dir:\n zip_path = zip_path.replace(os.path.sep, \"\", 1)\n zip_path = zip_path.replace(dir_to_zip + os.path.sep, \"\", 1)\n return zip_path\n\n outfile = zipfile.ZipFile(self.filepath, \"w\",\n compression=zipfile.ZIP_DEFLATED)\n\n # ePub Zips need uncompressed mimetype-file as first file\n outfile.write(os.path.join(self.zipdir, 'mimetype'), 'mimetype',\n compress_type=0)\n\n for root, dirs, files in os.walk(self.zipdir):\n for file_name in files:\n if file_name in exclude_files:\n continue\n file_path = os.path.join(root, file_name)\n outfile.write(file_path, trim(file_path))\n # Also add empty directories\n if not files and not dirs:\n zip_info = zipfile.ZipInfo(trim(root) + \"/\")\n outfile.writestr(zip_info, \"\")\n outfile.close()\n self.delete_zip_folder()",
"def ziplib(self):\n temp_lib_dynload = self.prefix_lib / \"lib-dynload\"\n temp_os_py = self.prefix_lib / \"os.py\"\n\n self.remove(self.site_packages)\n self.lib_dynload.rename(temp_lib_dynload)\n self.copyfile(self.python_lib / \"os.py\", temp_os_py)\n\n zip_path = self.prefix_lib / f\"python{self.ver_nodot}\"\n shutil.make_archive(str(zip_path), \"zip\", str(self.python_lib))\n\n self.remove(self.python_lib)\n self.python_lib.mkdir()\n temp_lib_dynload.rename(self.lib_dynload)\n temp_os_py.rename(self.python_lib / \"os.py\")\n self.site_packages.mkdir()",
"def _create_zip(self, src, dest, name):\n shutil.make_archive(os.path.join(dest, name), 'zip', root_dir=src)",
"def zip_lambda():\n # Don't zip these files\n ignore_files = [\"controller.zip\", \"role_policy.json\"] \n \n # Zip the files and store them in a buffer\n zip_data = BytesIO()\n zipf = zipfile.ZipFile(zip_data, \"w\")\n for root, dirs, files in os.walk(\"lambda\"):\n for fl in files:\n if fl not in ignore_files:\n path_to_file = os.path.join(root, fl)\n file_key = path_to_file[7:]\n zipf.write(path_to_file, arcname=file_key)\n zipf.close()\n \n # Write the buffer to a variable and return it\n zip_data.seek(0)\n data = zip_data.read()\n zip_data.close()\n return data",
"def _repackage(self):\n file_suffix = f'_{APP_NAME}{self._file.extension}'\n filename = self._file.name.replace(self._file.extension, file_suffix)\n unlocked_filepath = os.path.join(APP_SAVE_DIR, filename)\n\n filepaths = self._get_file_listing(self._temp_processing_dir)\n with zipfile.ZipFile(unlocked_filepath,'w') as repackaged_zip:\n for filepath in filepaths:\n rel_filepath = filepath.replace(self._temp_processing_dir,'')\n repackaged_zip.write(filepath,arcname=rel_filepath)\n \n print('File repackaged...')",
"def final_report_zip():\n shutil.make_archive(os.path.join(props.project_dir, props.report_name), 'zip', props.final_report_dir)",
"def pack_zip(output_filename, sources):\n previous_dir = os.getcwd()\n if not isinstance(sources, (list, tuple)) and \\\n isinstance(sources, str):\n sources = [sources]\n zip_ds = zipfile.ZipFile(output_filename, 'w', zipfile.ZIP_DEFLATED)\n for source in sources:\n os.chdir(os.path.dirname(source))\n if os.path.isdir(source):\n for root, dirs, files in os.walk(os.path.basename(source)):\n for file in files:\n zip_ds.write(os.path.join(root, file))\n else:\n zip_ds.write(os.path.basename(source))\n zip_ds.close()\n os.chdir(previous_dir)",
"def scan_zip(self):\n md5 = handle_uploaded_file(self.file, '.zip')\n url = 'StaticAnalyzer/?name={}&type=zip&checksum={}'.format(\n self.file_name, md5)\n data = {\n 'url': url,\n 'status': 'success',\n 'hash': md5,\n 'scan_type': 'zip',\n 'file_name': self.file_name,\n }\n\n add_to_recent_scan(self.file_name, md5, data['url'])\n logger.info('Performing Static Analysis of Android/iOS Source Code')\n return data",
"def __init__(self, zipfile, entry=...):\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Insert directory into local file path for layer archive. If required, this should be overridden by a subclass for language specific requirements.
|
def insert_layer_dir(
file_path: Path, relative_to: Path # pylint: disable=unused-argument
) -> Path:
return file_path
|
[
"def add_directory(self, local_dir):\n self.images.add_directory(os.path.abspath(local_dir))",
"def _add_root_dir(self, root_path: Path):\n dirname = os.path.split(root_path)[1] + '/'\n with open(self.log_path, mode='ab') as lf:\n lf.write(bytes(dirname, 'utf-8'))\n lf.write(b'\\n')",
"def PrependDirectory(directoryName, filename):\n return f'{directoryName}/{filename}'",
"def _assemble_file_path(source_dir: str, layer: str, identifier: str) -> str:\n if not source_dir.endswith(\"/\"):\n source_dir += \"/\"\n\n filepath = os.path.join(source_dir, identifier)\n\n filepath = os.path.join(filepath, layer)\n\n return filepath",
"def prepend_directory(self, directory):\n\n for i in self:\n i.name = directory + \"/\" + i.name\n\n self.insert(0, File(directory, None, True, False))",
"def add_dir(self, dir_path: Path):\n path_depth = self._get_path_depth(str(dir_path))\n dirname = os.path.split(str(dir_path))[1] + '/'\n log_line = '+- ' + dirname\n self._write_line_to_log(log_line, path_depth)",
"def archive_file(self) -> Path:\n return self.project.build_directory / (\n f\"{self.project.source_code.root_directory.name}.\"\n + (\"layer.\" if self.usage_type == \"layer\" else \"\")\n + f\"{self.runtime}.{self.project.source_code.md5_hash}.zip\"\n )",
"def zipdir(self):\n return os.path.join(self.location, self.trunc + '_unzipped')",
"def add_directory(path, newdir=\"Results\"): \n fname = os.path.basename(path)\n dname = os.path.dirname(path)\n new_dname = os.path.join(dname, newdir)\n if not os.path.exists(new_dname):\n os.makedirs(new_dname, exist_ok=False)\n return os.path.join(new_dname, fname)",
"def get_archive_install_dir(self) -> Path:\n # Prioritizes archive's install base dir over parent's install base dir\n install_base = self.arch_target_install_base or self.parent_target_install_base\n # Join archive's install dir to install base dir\n return Path(install_base.strip(r\"\\/\"), self.arch_target_install_dir.strip(r\"\\/\"))",
"def get_local_directory(self):\n \n # Gives Local Direcory path equivalent to URL Path in server\n rval = os.path.join(self.rootdir, self.domain)\n\n for diry in self.dirpath:\n if not diry: continue\n rval = os.path.abspath( os.path.join(rval, self.make_valid_filename(diry)))\n\n return os.path.normpath(rval)",
"def get_dir(self, file=''):\n return self.data_dir + file",
"def inc_dir(self):\n par = os.path.join(self.root, \"var\", \"inc\")\n if not os.path.exists(par):\n os.makedirs(par)\n return par",
"def dir_for_symbol(self, symbol):\n dir = symbol.lower()\n ltr = dir[0]\n par = os.path.join(self.root, \"etc\", \"sym\", ltr, dir)\n if not os.path.exists(par):\n os.makedirs(par)\n return par",
"def addPath(self, filename):\n\n # if default filename, then replace with script name and extension\n f = filename\n if f == \"default.log\":\n f = self.Script + \".log\"\n\n # if no path then add path\n if f.find(\"/\"):\n if self.__Output in [\"SYS\", \"BOTHSYS\"]:\n f = \"/var/log/\" + self.Script + \"/\" + f\n elif self.__Output in [\"USR\", \"BOTHUSR\"]:\n cwd = os.getcwd()\n f = cwd + \"/\" + self.Script + \"/\" + f\n\n return f",
"def AddFileSubdir( subdir, fname ):\n\n return os.path.join( os.path.dirname( fname ), subdir, os.path.basename( fname ) )",
"def remote_path_to_local_path(self, remote_path, local_root):\n return os.path.join(local_root, *remote_path.split('/'))",
"def add_path(self):\n path = self.add_local_entry.get()\n if path == '':\n return\n self.local_sources += [(len(self.local_sources), path)]\n source = self.local_sources[-1]\n self.local_section.tw.insert(parent='', index=source[0], iid=str(source[0]), values=source)\n self.local_section.tw.selection_toggle(str(source[0]))",
"def setDirectory(*args, **kwargs):\n \n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Iterate over the contents of the dependency directory. If ``gitignore_filter`` is set, it will be used to exclude files.
|
def iterate_dependency_directory(self) -> Iterator[Path]:
for child in self.project.dependency_directory.rglob("*"):
if child.is_dir():
continue # ignore directories
if self.gitignore_filter and self.gitignore_filter.match(child):
continue # ignore files that match the filter
yield child
|
[
"def package_files(self):\n for root, dirs, files in os.walk(os.path.join(self.path, 'p')):\n for basename in files:\n path = os.path.join(root, basename)\n if path != os.path.join(self.path, '.options'):\n yield os.path.join(self.path, path)",
"def walk_git_repository(repodir='.'):\n return _walk_repository(repodir, '.gitignore', parse_gitignore,\n match_gitignore)",
"def _iterate_files(self, ot: str) -> typing.Generator[dict, None, None]:\n path = self.path.joinpath(constants.PATHS[ot])\n if not path.exists():\n LOGGER.warning('No %s file found in project', ot)\n return\n for child in sorted(path.iterdir(), key=lambda p: str(p)):\n if child.is_dir():\n for s_child in sorted(child.iterdir(), key=lambda p: str(p)):\n if yaml.is_yaml(s_child):\n yield self._preprocess_definition(\n ot, s_child.parent.name,\n s_child.name.split('.')[0], yaml.load(s_child))\n elif yaml.is_yaml(child):\n yield self._preprocess_definition(\n ot, child.name.split('.')[0], None, yaml.load(child))",
"def __iter__(self):\n return self.list_files(only=self.only, exclude=self.exclude,\n recursive=self.recursive)",
"def _discover_files(self, files_or_modules: Sequence[str]) -> Iterator[str]:\n for something in files_or_modules:\n if os.path.isdir(something) and not os.path.isfile(\n os.path.join(something, \"__init__.py\")\n ):\n skip_subtrees: list[str] = []\n for root, _, files in os.walk(something):\n if any(root.startswith(s) for s in skip_subtrees):\n # Skip subtree of already discovered package.\n continue\n\n if _is_ignored_file(\n root,\n self.config.ignore,\n self.config.ignore_patterns,\n self.config.ignore_paths,\n ):\n skip_subtrees.append(root)\n continue\n\n if \"__init__.py\" in files:\n skip_subtrees.append(root)\n yield root\n else:\n yield from (\n os.path.join(root, file)\n for file in files\n if file.endswith(\".py\")\n )\n else:\n yield something",
"def gitlist2():\n local('git ls-files -i -X .gitignore')",
"def traverse( self, directory, whitelist = lambda f: True, blacklist = lambda g: False ):\n if blacklist( directory ):\n return\n dirs = [ directory ]\n while dirs:\n path = dirs.pop()\n for f in os.listdir( path ):\n f = os.path.join( path, f )\n z = f.replace( directory + os.sep, \"\" )\n if blacklist( z ):\n continue\n if os.path.isdir( f ):\n dirs.append( f )\n elif os.path.isfile( f ) and whitelist( z ):\n yield f",
"def _file_list(self, directory, excluded=\"\"):\n for dirname, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if filename not in excluded:\n yield os.path.join(dirname, filename)",
"def _contents(self, dir):\n def skip(path):\n return path.is_dir() or self._ignore(path)\n\n return [x for x in dir.rglob('*') if not skip(x)]",
"def dockerfile_globs(self, dockerfile='Dockerfile'):\n dockerfile_path = self.workdir.join(dockerfile)\n with dockerfile_path.open() as handle:\n for line in handle:\n if line[:4] == 'ADD ':\n add_value = line[4:]\n try:\n for path in json.loads(add_value)[:-1]:\n yield path\n\n except ValueError:\n add_file, _ = add_value.split(' ', 1)\n yield add_file\n\n yield dockerfile\n yield '.dockerignore'",
"def gitignore(self):\n gitignore = os.path.join(self.lib_dir, '.gitignore')\n cmd = 'git config --global core.excludesfile {}'.format(gitignore)\n self.run_cmd(cmd)",
"def dir_iter (* paths, ** kw) :\n for base, dirs, files, filter in _walk (* paths, ** kw) :\n for d in dirs :\n if filter is None or filter (d) :\n yield os.path.join (base, d) if base else d",
"def _iter_module_files():\n for module in list(sys.modules.values()):\n filename = getattr(module, '__file__', None)\n if filename:\n if filename[-4:] in ('.pyo', '.pyc'):\n filename = filename[:-1]\n yield filename",
"def process_directory():\n repo_name = Path.cwd().name\n repo_work = WORK_DIR / repo_name\n repo_work.mkdir(parents=True, exist_ok=True)\n repo_urls = set()\n if (js_reqs := Path(\"package-lock.json\")).exists():\n shutil.copyfile(js_reqs, repo_work / \"package-lock.json\")\n with change_dir(repo_work):\n repo_urls.update(check_js_dependencies())\n if (py_reqs := find_py_reqs()):\n shutil.copyfile(py_reqs, repo_work / \"base.txt\")\n with change_dir(repo_work):\n repo_urls.update(check_py_dependencies())\n return repo_urls",
"def gitlist():\n local('git ls-files -i --exclude-standard')",
"def get_files_to_check(project_dir, exclude_patterns):\n git = Git(project_dir)\n included_files = git.ls_files().split('\\n')\n selected_files = exclude_file_paths(included_files, exclude_patterns)\n\n return selected_files",
"def all_paths():\n repo_root = os.path.abspath(os.path.join(INFRABOTS_DIR, os.pardir, os.pardir))\n output = subprocess.check_output(['git', 'ls-files'], cwd=repo_root).rstrip()\n return output.splitlines()",
"def dir_iterator(srcdir: str):\n for fn in os.listdir(srcdir):\n if fn.endswith('.json'):\n with open(os.path.join(srcdir, fn), 'r') as f:\n yield json.load(f)",
"def traverse_commits(self) -> Generator[Commit, None, None]:\n logger.info('Git repository in {}'.format(self.git_repo.path))\n all_cs = self._apply_filters_on_commits(self.git_repo.get_list_commits())\n\n if not self.reversed_order:\n all_cs.reverse()\n\n for commit in all_cs:\n logger.info('Commit #{} in {} from {}'\n .format(commit.hash, commit.author_date, commit.author.name))\n\n if self._is_commit_filtered(commit):\n logger.info('Commit #{} filtered'.format(commit.hash))\n continue\n\n yield commit"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize deployment package. This should be used in place of creating an instance of this class directly as it will automatically account for the S3 object already existing.
|
def init(
cls,
project: _ProjectTypeVar,
usage_type: Literal["function", "layer"] = "function",
) -> DeploymentPackage[_ProjectTypeVar]:
s3_obj = DeploymentPackageS3Object(project, usage_type)
if s3_obj.exists:
if s3_obj.runtime == project.runtime:
return s3_obj
LOGGER.warning(
"runtime of deployment package found in S3 (%s) does not match "
"requirement (%s); deleting & recreating...",
s3_obj.runtime,
project.runtime,
)
s3_obj.delete()
return cls(project, usage_type)
|
[
"def __init__ (self):\n # Create a connection to S3\n self.handle = self.connect()",
"def _set_s3(self):\n logger.info(\"Setting up s3 ...\")\n\n cluster_name_id = AXClusterId().get_cluster_name_id()\n\n self._bucket_name = AXClusterDataPath(cluster_name_id).bucket()\n self._bucket = Cloud().get_bucket(self._bucket_name)\n artifact_prefix = AXClusterDataPath(cluster_name_id).artifact()\n self._log_s3_prefix = artifact_prefix\n\n self._bucket_ax_is_external = AXLogPath(cluster_name_id).is_external()\n self._bucket_name_ax = AXLogPath(cluster_name_id).bucket()\n self._bucket_ax = Cloud().get_bucket(self._bucket_name_ax)\n artifact_prefix_ax = AXLogPath(cluster_name_id).artifact()\n\n self._log_s3_prefix_ax = artifact_prefix_ax\n\n assert self._bucket.exists(), \"S3 bucket {} DOES NOT exist\".format(self._bucket_name)\n assert self._bucket_ax.exists(), \"S3 bucket {} DOES NOT exist\".format(self._bucket_name_ax)\n logger.info(\"Using S3 bucket %s, with log prefix %s\", self._bucket.get_bucket_name(), self._log_s3_prefix)\n logger.info(\"Using S3 bucket %s, with log prefix %s for AX\", self._bucket_ax.get_bucket_name(), self._log_s3_prefix_ax)",
"def __init__(self, s3_config: Union[dict, None]):\n if s3_config is not None:\n if isinstance(s3_config, s3fs.S3FileSystem):\n s3 = s3_config\n else:\n key = s3_config['accessKey']\n secret = s3_config['accessSecret']\n s3 = s3fs.S3FileSystem(key=key, secret=secret)\n else:\n s3 = None\n self.s3 = s3",
"def init():\n try:\n\tinit1()\n except:\n print >>sys.stderr, INIT % {'key_dir': join(expanduser('~'), '.s3')}\n sys.exit(1)",
"def setup_s3():\n s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))\n logging.info('Successfully initialized S3 client')\n return s3",
"def init(ctx, bucket=None):\n session = boto3.Session()\n s3 = boto3.client('s3')\n config={}\n\n if bucket == None and os.environ.get('KVS3_BUCKET') == None:\n bucket_init = input('Enter s3 key value store bucket name: ')\n s3_name_requirements = re.compile(\"^[a-z0-9]{1}[a-z0-9\\-\\.]{1,61}[a-z0-9\\.]{1}$\")\n if s3_name_requirements.match(bucket_init):\n config['bucket'] = bucket_init\n with open(INIT_FILE, 'w') as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n else:\n print('kvs3: invalid bucket name')\n sys.exit(1)\n\n validate(s3, bucket)",
"def connect_s3(self):\n self.out('- Connecting to S3 and making bucket.\\n')\n self.s3 = boto.connect_s3()\n self.bucket = self.s3.create_bucket(self.bucket_name)\n self.bucket = self.s3.get_bucket(self.bucket_name)\n self.bucket.set_acl(self.default_acl)\n self.bucket.set_cors(self.default_cors)",
"def __init__(self):\n self.conn = Connection().cloudformation_connection()\n self.web_bucket_template = '{\"AWSTemplateFormatVersion\":\"2010-09-09\",\"Description\":\"AWS CloudFormation Sample Template S3_Website_Bucket_With_Retain_On_Delete: Sample template showing how to create a publicly accessible S3 bucket configured for website access with a deletion policy of retail on delete. **WARNING** This template creates an S3 bucket that will NOT be deleted when the stack is deleted. You will be billed for the AWS resources used if you create a stack from this template.\",\"Resources\":{\"S3Bucket\":{\"Type\":\"AWS::S3::Bucket\",\"Properties\":{\"AccessControl\":\"PublicRead\",\"WebsiteConfiguration\":{\"IndexDocument\":\"index.html\",\"ErrorDocument\":\"error.html\"}},\"DeletionPolicy\":\"Retain\"}},\"Outputs\":{\"WebsiteURL\":{\"Value\":{\"Fn::GetAtt\":[\"S3Bucket\",\"WebsiteURL\"]},\"Description\":\"URL for website hosted on S3\"},\"S3BucketSecureURL\":{\"Value\":{\"Fn::Join\":[\"\",[\"https://\",{\"Fn::GetAtt\":[\"S3Bucket\",\"DomainName\"]}]]},\"Description\":\"Name of S3 bucket to hold website content\"}}}'",
"def init1():\n home_dir = expanduser('~')\n init0(key_id = file(join(home_dir, '.s3', 'key_id')).read().strip(),\n\t secret = file(join(home_dir, '.s3', 'key')).read().strip())",
"def __init__(self, json_service_account: str = JSON_KEYS_SERVICE_ACCOUNT,\n bucket_name: str = DISEASE_HISTORY_FILES_NAME):\n self.client = storage.Client.from_service_account_json(json_service_account)\n\n try:\n self.bucket = self.client.get_bucket(bucket_name)\n except NotFound:\n self.bucket = self.client.create_bucket(bucket_name)",
"def _deploy_to_s3():\n s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'\n s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude \"*\" --include-from gzip_types.txt put gzip/ %s'\n\n for bucket in env.s3_buckets:\n env.s3_bucket = bucket\n local(s3cmd % ('s3://%(s3_bucket)s/' % env))\n local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))",
"def __init__(self, upload_parameters):\n\n self.time_str = upload_parameters.time_str\n self.model_path = upload_parameters.model_path\n self.excel_data_df = upload_parameters.get_excel_dataframe()\n self.aws_helper = upload_parameters.get_aws_helper()\n self.metadata_bucket = upload_parameters.metadata_bucket\n self.model_prediction_bucket = upload_parameters.model_prediction_bucket\n self.aws_object_key_prefix = upload_parameters.aws_object_key_prefix\n\n self._do_live_uploads = self.aws_helper is not None",
"def __init__(__self__,\n resource_name: str,\n args: Optional[EcsDeploymentSetArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(self):\r\n self.postgres = PostgreSQL()\r\n self.couch_query = Queries()\r\n self.aws3 = AwsS3()\r\n super(DeviceImages, self).__init__()",
"def deploy_to_s3(self):\r\n self.tempdir = tempfile.mkdtemp('s3deploy')\r\n\r\n for keyname, absolute_path in self.find_file_paths():\r\n self.s3_upload(keyname, absolute_path)\r\n\r\n shutil.rmtree(self.tempdir, True)\r\n return True",
"def create_bucket(self):\n AWSApi.instance().s3.create_bucket(bucket_name=self.name, region=self.region)",
"def __init__(\n self,\n bucket,\n key,\n artifact_type=ArtifactType.MISC,\n local_folder=None,\n is_copy=False,\n ):\n self._bucket = bucket\n self._key = key\n self._local_folder = local_folder\n self._type = artifact_type\n self._is_copy = is_copy",
"def initialize():\n # Source bucket list\n if 'SOURCE_BUCKETS' in os.environ:\n source_bucket_list = os.getenv('SOURCE_BUCKETS')\n bucket_list = [x.strip() for x in source_bucket_list.split(',')]\n else:\n bucket_list = None\n\n # Target buckets\n if 'TARGET_BUCKET' in os.environ:\n target_bucket = os.getenv('TARGET_BUCKET')\n else:\n target_bucket = None\n\n # Ignore buckets\n global IGNORE_BUCKETS\n if 'IGNORE_BUCKETS' in os.environ:\n ignore_bucket_list = os.getenv('IGNORE_BUCKETS')\n IGNORE_BUCKETS = [x.strip() for x in ignore_bucket_list.split(',')]\n\n return (bucket_list, target_bucket)",
"def __init__(self, sdk_root):\n if not os.path.isdir(sdk_root):\n raise ValueError('The given Cloud SDK root does not exist: [{0}]'\n .format(sdk_root))\n\n self.__sdk_root = console_attr.DecodeFromInput(sdk_root)\n self._state_directory = os.path.join(sdk_root,\n InstallationState.STATE_DIR_NAME)\n self.__backup_directory = os.path.join(self._state_directory,\n InstallationState.BACKUP_DIR_NAME)\n self.__trash_directory = os.path.join(self._state_directory,\n InstallationState.TRASH_DIR_NAME)\n\n self.__sdk_staging_root = (os.path.normpath(self.__sdk_root) +\n InstallationState.STAGING_ROOT_SUFFIX)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List of compatible runtimes.
|
def compatible_runtimes(self) -> Optional[List[str]]:
if self.META_TAGS["compatible_runtimes"] in self.object_tags:
return self.object_tags[self.META_TAGS["compatible_runtimes"]].split("+")
return None
|
[
"def compatible_runtimes(self) -> Optional[List[str]]:\n return self.project.compatible_runtimes",
"async def runtimes(self) -> List[Runtime]:\n runtimes = await self._http_session.get_response(\"get\", \"runtimes/\")\n runtime_list = []\n for runtime in runtimes:\n runtime_list.append(\n Runtime(\n language=runtime.get(\"language\"),\n aliases=runtime.get(\"aliases\"),\n version=runtime.get(\"version\"),\n runtime=runtime.get(\"runtime\")\n )\n )\n self._runtimes = runtime_list\n return runtime_list",
"def get_dotnet_runtimes() -> List[dotnet_const.Runtime]:\n runtimes = []\n for line in check_output([get_exe_name(\"dotnet\"), \"--list-runtimes\"]).decode(\"utf-8\").splitlines():\n name, version, path = line.split(\" \", 2)\n path = join(path[1:-1], version)\n runtimes.append(dotnet_const.Runtime(name=name, version=version, path=path))\n return runtimes",
"def GetSupportedEngines():\r\n pass",
"def _get_supportedProductTypes(self) -> \"std::vector< std::string,std::allocator< std::string > >\" :\n return _core.Application__get_supportedProductTypes(self)",
"def supported_types(self) -> List[BundleType]:",
"def runtime_packages(self):\n return self.packages | self.depends",
"def list_available_drivers():\n return drivers.available_drivers()",
"def available_engines() -> Sequence[\"DiffEngine\"]:\n try:\n return tuple(getattr(DiffEngine, \"_available_engines\"))\n except AttributeError:\n result = []\n try:\n result.append(DiffEngine.create(name=\"native\"))\n except ImportError:\n pass\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=True))\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=False))\n result = tuple(result)\n setattr(DiffEngine, \"_available_engines\", result)\n return result",
"def _get_runtime_supported_types(opset_version: target) -> Set[type]:\n supported_types = {types.fp16, types.fp32, types.int32, types.str, types.bool}\n if opset_version >= target.iOS17:\n supported_types.update({types.int8, types.uint8, types.int16, types.uint16})\n return supported_types",
"def compatible_architectures(self) -> Optional[List[str]]:\n return self.project.compatible_architectures",
"def SupportedPlatforms(self):\n return self.platform_infos.keys()",
"def platform() -> list:\n if GetOS.OS == \"Linux\":\n x = InformationManager(SysFiles.ver.value)\n return x.openF().read().split()[0]\n elif GetOS.OS == \"darwin\":\n x = get_output(\"sw_vers\")\n return x.split()[1:3]",
"def supported_tags(self, force_manylinux=True):\n return _get_supported(\n platform=self.platform,\n impl=self.impl,\n version=self.version,\n abi=self.abi,\n force_manylinux=force_manylinux\n )",
"def get_backend_list():\n lst = [NumpyBackend(), ]\n\n if torch:\n lst.append(TorchBackend())\n\n if jax:\n lst.append(JaxBackend())\n\n return lst",
"def test_determine_runtime():\n runtime = determine_runtime()\n try:\n assert \"docker\" in runtime or \"podman\" in runtime\n except ContainerRuntimeException:\n pass",
"def list_runtimes(self, docker_image_name='all'):\n if docker_image_name == 'default':\n docker_image_name = self._get_default_runtime_image_name()\n runtimes = []\n actions = self.cf_client.list_actions(self.package)\n\n for action in actions:\n action_image_name, memory = self._unformat_action_name(action['name'])\n if docker_image_name == action_image_name or docker_image_name == 'all':\n runtimes.append((action_image_name, memory))\n return runtimes",
"def from_simctl_info(info: List[Dict[str, Any]]) -> List[\"Runtime\"]:\n runtimes = []\n for runtime_info in info:\n runtimes.append(Runtime(runtime_info))\n return runtimes",
"def runtime_info():\n return True, runtime.runtime_info()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Response from HeadObject API call.
|
def head(self) -> Optional[HeadObjectOutputTypeDef]:
try:
return self.bucket.client.head_object(
Bucket=self.bucket.name, Key=self.object_key
)
except self.bucket.client.exceptions.ClientError as exc:
status_code = exc.response.get("ResponseMetadata", {}).get(
"HTTPStatusCode", 0
)
if status_code == 404:
LOGGER.verbose(
"%s not found",
self.bucket.format_bucket_path_uri(key=self.object_key),
)
return None
if status_code == 403:
# we can't handle this error but, we can enhance the error message
LOGGER.error(
"access denied for object %s",
self.bucket.format_bucket_path_uri(key=self.object_key),
)
raise
|
[
"def do_HEAD(self):\n # create request object\n self.create_request(\"get\")\n f = self.on_request(\"get\")\n if f:\n f.close()",
"def head_object(self, container_name, key):\n url = '/'.join([self.endpoint, container_name, key])\n try:\n res = self.session.head(url)\n if res.status_code == 200:\n return res.headers\n elif res.status_code == 404:\n raise StorageNoSuchKeyError(container_name, key)\n else:\n raise Exception('{} - {}'.format(res.status_code, key))\n except Exception as e:\n raise StorageNoSuchKeyError(container_name, key)",
"def do_HEAD(self):\n self.doGET(True)",
"def head(self, url, url_params=empty.dict, headers=empty.dict, timeout=None, **params):\n return self.request('HEAD', url=url, headers=headers, timeout=timeout, **params)",
"def headRequest(group, index):",
"def testGenerateHeadUrl(self):\n self._RunAsync(self.object_store.Put, self.key, 'foo')\n\n url = self.object_store.GenerateUrl(self.key, method='HEAD', expires_in=100)\n response = httpclient.HTTPClient().fetch(url, method='HEAD', request_timeout=3.0)\n self.assertEqual(response.code, 200)\n self.assertEqual(response.headers['Content-Length'], '3')",
"def get_blob_meta(objecturl, logprefix=\"\", **kwargs):\n bucketname, keyname = s3_split_url(objecturl)\n logprefix = logprefix + \" \" if logprefix else logprefix\n logger.debug(\"%sfetching meta for URL: %s\", logprefix, objecturl)\n s3 = boto3.client('s3')\n try:\n # if 'RequestPayer' not in kwargs:\n # kwargs['RequestPayer'] = 'requester'\n\n head_res = s3.head_object(Bucket=bucketname, Key=keyname, **kwargs)\n except ClientError as clierr:\n if clierr.response['Error']['Code'] == '404':\n raise NoSuchFile(objecturl)\n logger.error(\"%scould not fetch URL (%s): %s\", logprefix, repr(clierr.response['Error']['Code']), objecturl,\n exc_info=clierr)\n raise\n return head_res",
"def metaHead(self):\n if hasattr(self._meta, 'head'):\n print(self._meta.head())\n else:\n print(\"No metadata view set.\")",
"def get_obj_stats(self, bucket_name_, prefix_, obj_name_):\n\n stats = {}\n\n try:\n obj_header = self.client.head_object(\n Bucket=bucket_name_, Key=prefix_ + obj_name_)\n\n stats[\"size_bytes\"] = obj_header[\"ContentLength\"]\n stats[\"size_mb\"] = obj_header[\"ContentLength\"] / 1048576\n stats[\"last_modified\"] = obj_header[\"LastModified\"]\n\n except ClientError as e:\n logging.info(\n f\"There was an error retrieving stats for {obj_name_}. {e} \")\n\n return stats",
"def get_meta(self):\n if not self.has_meta:\n try:\n self.head_object = self.get_s3_connection().head_object(Bucket=self.get_bucket(), Key=self.get_key())\n self.has_meta = True\n except Exception as e:\n if self.region is None:\n logging.debug('Could not get meta-data of S3Object. Region not set so assuming incorrect region.')\n logging.debug('Try to determine bucket region, and try to reconfigure the connection.')\n self.region = 'unknown'\n self.connect_to_bucket_region()\n return self.get_meta()\n else:\n region_set_no_metadata = 'Could not get meta-data of S3Object and region was {r}.'\n raise Exception(region_set_no_metadata.format(r=str(self.region))) from e\n return self.head_object",
"def http_head(url):\n req = Request(url)\n req.get_method = lambda: 'HEAD'\n try:\n resp = urlopen(req)\n return resp.getcode()\n except HTTPError as exc:\n return exc.getcode()",
"def test_getPageHead(self):\n def _getPage(method):\n return getPage(self.getURL(\"file\"), method=method)\n return defer.gatherResults([\n _getPage(\"head\").addCallback(self.assertEqual, \"\"),\n _getPage(\"HEAD\").addCallback(self.assertEqual, \"\")])",
"def get_headers(self, url_object):\n if \"etag\" or \"last-modified\" in url_object.headers.dict:\n etag = url_object.headers.get(\"etag\")\n lm = url_object.headers.get(\"last-modified\")\n return {\"etag\": etag, \"last-modified\": lm}\n\n self.logger.error(\"GetHeaders: Error occurred while obtaining etag and last-modified headers\")",
"def retrieveHeaders(self):\n if not self.headers:\n oururl = urlparse.urlparse(self.url)\n if oururl.scheme == \"http\":\n conn = httplib.HTTPConnection(oururl.netloc)\n else:\n conn = httplib.HTTPSConnection(oururl.netloc)\n conn.request(\"HEAD\", oururl.path)\n response = conn.getresponse()\n self.headers = dict(response.getheaders())\n if self.downloaded_filesize < 0:\n try:\n content_length = self.headers[\"content-length\"]\n self.downloaded_filesize = int(content_length)\n except KeyError:\n self.logger.log(\"url returned no content-length: %r\" % (self.url))\n except ValueError:\n self.logger.log(\"url returned invalid filesize: %r\" % content_length)\n if not self.objectage:\n try:\n age = self.headers['last-modified']\n self.objectage = datetime.strptime(age, '%a, %d %b %Y %H:%M:%S %Z')\n except KeyError:\n self.logger.log('url returned no last-modified: %r' % (self.url))\n except ValueError:\n self.logger.log('url returned invalid last-modified: %r' % age)\n # message-length should be handled too:\n # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.4",
"def test_head_request_response_upload_offset_when_resource_exists(api):\n resp = request_creation(100, api)\n assert resp.status_code == 201\n resource_path = resp.headers['Location']\n assert resource_path is not None\n\n resp = api.requests.head(resource_path)\n\n assert resp.status_code == 200\n assert resp.headers['Upload-Offset'] == '0'\n assert resp.headers['Upload-Length'] == '100'\n assert resp.headers['Tus-Resumable'] == '1.0.0'\n assert resp.headers['Cache-Control'] == 'no-store'",
"def get_headphones(headphones_id):\n headphones_query = headphones.get_headphones(headphones_id)\n if not headphones_query:\n return custom_response({'error': 'headphones not found'}, 404)\n \n data = headphones_schema.dump(headphones_query)\n return custom_response(data, 200)",
"def getHead(self) -> \"SoNode *\":\n return _coin.SoPath_getHead(self)",
"def test_head_meta(self, client, article, staffer_profile):\n response = client.get(article.get_url())\n # first published in 2019; in GMT\n assertContains(\n response,\n '<meta name=\"article.published_time\" content=\"%s\">'\n % article.first_published_at.isoformat(),\n html=True,\n )\n # most recently published/modified in 2020; in GMT\n assertContains(\n response,\n '<meta name=\"article.modified_time\" content=\"%s\">'\n % article.last_published_at.isoformat(),\n html=True,\n )\n # profile URL for staffer (postdoc doesn't have profile)\n assertContains(\n response,\n '<meta name=\"article.author\" content=\"%s\"/>' % staffer_profile.get_url(),\n html=True,\n )",
"def on_head(self, req, resp, *, file_id):\n upload_data = db.get_by_id(UUID(file_id))\n\n _set_common_headers(resp)\n\n if upload_data is None:\n resp.status_code = api.status_codes.HTTP_404\n return\n\n resp.headers[headers.UPLOAD_OFFSET] = str(upload_data.upload_offset)\n if upload_data.upload_metadata is not None:\n resp.headers[headers.UPLOAD_METADATA] = to_metadata_header(upload_data.upload_metadata)\n\n if upload_data.upload_concat is not None:\n resp.headers[headers.UPLOAD_CONCAT] = upload_data.upload_concat\n\n if upload_data.upload_length is None:\n resp.headers[headers.UPLOAD_DEFER_LENGTH] = str(1)\n else:\n resp.headers[headers.UPLOAD_LENGTH] = str(upload_data.upload_length)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update tags of the S3 object.
|
def update_tags(self) -> None:
new_tags = self.build_tag_set(url_encoded=False)
if new_tags == self.object_tags:
LOGGER.debug(
"%s tags don't need to be updated",
self.bucket.format_bucket_path_uri(key=self.object_key),
)
return
self.bucket.client.put_object_tagging(
Bucket=self.bucket.name,
Key=self.object_key,
Tagging={"TagSet": [{"Key": k, "Value": v} for k, v in new_tags.items()]},
)
LOGGER.info("updated S3 object's tags")
|
[
"def update_tags(self):\n raise NotImplementedError",
"def set_tags(self, url_prefix, microver, instance, tags):\n try:\n for tag in tags:\n instance.add_tag(self.conn, tag)\n except AttributeError:\n # Try a low-level access if SDK version is old\n for tag in tags:\n response = self.conn.put(\n self._get_tag_url(url_prefix, instance, tag),\n microversion=microver)\n if response.status_code not in [201, 204]:\n self.fail_json(\n msg='API returned something bad %s' % response.reason)\n return self.fetch_tags(url_prefix, microver, instance)",
"def update_s3(self, storage_provider_id, name=None, bucket=None, access_key=None, secret_access_key=None,\n endpoint=None, region=None, signature_version=None):\n\n config = {}\n if bucket:\n config['bucket'] = bucket\n if access_key:\n config['accessKey'] = access_key\n if secret_access_key:\n config['secretAccessKey'] = secret_access_key\n if endpoint is not None:\n config['endpoint'] = endpoint\n if region is not None:\n config['region'] = region\n if signature_version is not None:\n config['signatureVersion'] = signature_version\n\n storage_provider = models.StorageProvider(\n name=name,\n config=config,\n )\n\n repository = self.build_repository(repositories.UpdateStorageProvider)\n return repository.update(storage_provider_id, storage_provider)",
"def put(self, tag, urls):\n return self._upload('%s/ddfs/tag/%s' % (self.master, tagname(tag)),\n StringIO(json.dumps(urls)))",
"def upload_to_s3(file_name, bucket, object_name):\n print(file_name, bucket, object_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response",
"def save(client, buf, bucket, key, metadata, source_version=None):\n # Get Object Settings\n request_payer_args, _ = get_requester_payment(client, bucket)\n object_info_args, _ = get_object_info(client, bucket, key, source_version)\n tagging_args, _ = get_object_tags(client, bucket, key, source_version)\n acl_args, acl_resp = get_object_acl(client, bucket, key, source_version)\n extra_args = {\n **request_payer_args,\n **object_info_args,\n **tagging_args,\n **acl_args,\n **{\"Metadata\": metadata},\n }\n logger.info(\"Object settings: %s\", extra_args)\n # Write Object Back to S3\n logger.info(\"Saving updated object to s3://%s/%s\", bucket, key)\n resp = client.upload_fileobj(buf, bucket, key, ExtraArgs=extra_args)\n new_version_id = resp[\"VersionId\"]\n logger.info(\"Object uploaded to S3\")\n # GrantWrite cannot be set whilst uploading therefore ACLs need to be restored separately\n write_grantees = \",\".join(get_grantees(acl_resp, \"WRITE\"))\n if write_grantees:\n logger.info(\"WRITE grant found. Restoring additional grantees for object\")\n client.put_object_acl(\n Bucket=bucket,\n Key=key,\n VersionId=new_version_id,\n **{\n **request_payer_args,\n **acl_args,\n \"GrantWrite\": write_grantees,\n },\n )\n logger.info(\"Processing of file s3://%s/%s complete\", bucket, key)\n return new_version_id",
"def _save(self, s3_prefix):\n bucket_name, prefix = split_s3_path(s3_prefix)\n bucket = self.s3_conn.get_bucket(bucket_name)\n self._compute_percentages()\n self.stats['last_updated'] = datetime.now().isoformat()\n key = self._key(bucket, prefix)\n key.set_contents_from_string(json.dumps(self.stats))",
"def upload_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.put_object(\n Body=os.environ['TF_VAR_elastic_beanstalk_s3_key'],\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except Exception as e:\n raise e",
"def sync_buckets():\n\n client = boto3.client(\"s3\")\n try:\n buckets = client.list_buckets()[\"Buckets\"]\n except Exception as err:\n logger.error(f\"Problem listing buckets in S3: {err}\")\n raise\n\n buckets = [b for b in buckets if f\"-{settings.STAGE}-sd-\" in b[\"Name\"]]\n\n logger.info(f\"Found {len(buckets)} buckets to update\")\n for bucket_info in buckets:\n try:\n bucket = Bucket.objects.get(name=bucket_info[\"Name\"])\n except Bucket.DoesNotExist:\n logger.info(f\"Found new bucket {bucket_info['Name']}\")\n bucket = Bucket(\n name=bucket_info[\"Name\"],\n created_on=bucket_info[\"CreationDate\"],\n organization=Organization.objects.earliest(\"created_on\"),\n )\n\n link_study(bucket)\n\n bucket.deleted = False\n\n bucket.save()\n\n db_buckets = {b.name for b in Bucket.objects.filter(deleted=False).all()}\n deleted_buckets = db_buckets - {b[\"Name\"] for b in buckets}\n logger.info(f\"Found {len(buckets)} buckets that are no longer in S3\")\n for bucket in deleted_buckets:\n bucket = Bucket.objects.get(name=bucket)\n bucket.deleted = True\n bucket.save()",
"def updateTags(self, channel, tags):\r\n for key, value in tags.items():\r\n if self.tagExists(channel, key):\r\n self.changeTagValue(channel, key, value)\r\n else:\r\n self.createTag(channel, key, value)",
"def upload_dictionary(self, bucket_name, file_name, dictionary):\n s3_object = self.s3.Object(bucket_name, file_name)\n s3_object.put(Body=json.dumps(dictionary))",
"def update_tags(instance, **kwargs):\n old_tags = list(instance.tags.all())\n for token in instance.content.tags:\n tag, t_is_new = Tag.objects.get_or_create(content=token,\n defaults={'creator':instance.author})\n\n taggedNote, tn_is_new = TaggedNote.objects.get_or_create(\n note=instance, tag=tag,\n defaults={'tagged_by':instance.author})\n if tag in old_tags:\n # old tags that remain in the content are removed from\n # the `old_tags` list, which in the end contains only \n # tags that are not longer used by `instance`\n old_tags.remove(tag)\n\n for tag in old_tags:\n taggedNote = TaggedNote.objects.get(note=instance,\n tag=tag)\n taggedNote.delete()",
"def _set_object_tagset(self, filename, tagset):\n attribute_block = self._object_attribute_block(filename)\n attribute_block.set_tagset(tagset)",
"def upload_to_S3(bucket, key, file_object):\n k = Key(bucket)\n k.key = key\n k.set_contents_from_file(file_object)\n k.set_acl('public-read')\n return k.generate_url(expires_in=0, query_auth=False, force_http=True)",
"def sync_s3():\n\n subprocess.run([\"aws\", \"s3\", \"sync\", \"./\", \"s3://{0}\".format(BLOG_BUCKET_NAME)])",
"def write_objects_to_s3(bucket, key, objects):\n # We use sort_keys=True to ensure deterministic results. The separators\n # flag allows us to write more compact JSON, which makes things faster!\n # See https://twitter.com/raymondh/status/842777864193769472\n json_str = b'\\n'.join([\n json.dumps(m, sort_keys=True, separators=(',', ':')).encode('ascii')\n for m in objects\n ])\n\n client = boto3.client('s3')\n client.put_object(Bucket=bucket, Key=key, Body=json_str)",
"def modify_tags(self,note_id,tags):\n\n self._find_note(note_id).tags = tags",
"def set_tag(self, tag, state = True):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.object.tag\", \r\n self._object._eco_id, tag, state)\r\n p2e._app.Exec(arg_str)",
"def save_to_s3(bucket_name, file_name, data):\n\n s3 = boto3.resource('s3')\n obj = s3.Object(bucket_name, file_name)\n resp = obj.put(Body=json.dumps(data))\n return resp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function add G losses to tensorboard and store the value in the logger loss_adv loss_direct loss_G
|
def tb_add_step_loss_g(self, writer, global_step):
step_loss_G = {
'loss_adv': self.loss_adv_G.item(),
'loss_direct': self.loss_direct_G.item(),
'loss_G': self.loss_G.item(),
}
writer.add_scalars("Train/Generator", step_loss_G, global_step=global_step)
self.Logger.append_G(step_loss_G)
|
[
"def add_loss(loss):\n tf.add_to_collection(LOSSES, loss)",
"def compute_G_loss(self):\n # netD(0) for the separation branch.\n pred_fake1 = self.netD(0, self.fake_A)\n pred_fake2 = self.netD(0, self.fake_B)\n pred_fake3 = self.netD(0, self.fake_C)\n pred_fake4 = self.netD(0, self.fake_D)\n pred_fake5 = self.netD(0, self.fake_E)\n\n self.loss_G_GAN = self.criterionGAN(pred_fake1, True) \\\n + self.criterionGAN(pred_fake2, True) * self.label[0] \\\n + self.criterionGAN(pred_fake3, True) * self.label[1] \\\n + self.criterionGAN(pred_fake4, True) * self.label[2] \\\n + self.criterionGAN(pred_fake5, True) * self.label[3]\n\n self.loss_Ln = self.criterionL1(self.real_A, self.fake_A) \\\n + self.criterionL2(self.real_B, self.fake_B) * self.label[0] \\\n + self.criterionL2(self.real_C, self.fake_C) * self.label[1] \\\n + self.criterionL1(self.real_D, self.fake_D) * self.label[2] \\\n + self.criterionL2(self.real_E, self.fake_E) * self.label[3]\n\n self.loss_VGG = self.criterionVGG(self.fake_A, self.real_A) \\\n + self.criterionVGG(self.fake_B, self.real_B) * self.label[0] \\\n + self.criterionVGG(self.fake_C, self.real_C) * self.label[1] \\\n + self.criterionVGG(self.fake_D, self.real_D) * self.label[2] \\\n + self.criterionVGG(self.fake_E, self.real_E) * self.label[3]\n\n self.loss_G = self.loss_G_GAN * self.opt.lambda_GAN + self.loss_Ln * self.opt.lambda_Ln + self.loss_VGG * self.opt.lambda_VGG\n\n return self.loss_G",
"def add_loss_summaries():\n losses = tf.get_collection(\"summary_loss\")\n \n loss_averages = tf.train.ExponentialMovingAverage(0.95, name='avg')\n loss_averages_op = loss_averages.apply(losses)\n\n for loss_op in losses:\n tf.scalar_summary(loss_op.op.name +' (raw)', loss_op)\n tf.scalar_summary(loss_op.op.name, loss_averages.average(loss_op))\n\n return loss_averages_op",
"def update_loss(self, loss):\n self.loss += loss\n self.num_loss_attempts += 1",
"def log_losses(self):\n for loss_name, running_loss in self.running_losses.items():\n self.tensorboard_writer.add_scalar('Loss/' + loss_name, running_loss / self.running_loss_step, self.step)\n self.init_losses()",
"def get_adv_loss(self, sess, history, labels, sentence, rewards, baseline):\n feed_dict = {self.enc_inp: history, self.labels: labels, self.sentence: sentence, self.rewards: rewards,\n self.baseline: baseline}\n outputs = sess.run([self.g_part0,self.g_part1,self.g_part2,self.g_part3,self.g_part4, self.g_loss], feed_dict)\n return outputs",
"def gan_loss(self,gen_images,loss):\n labels = Variable(torch.ones( [gen_images.size()[0], 1] ))\n \n return(loss(gen_images,labels))",
"def logger_iter_gan(self, epoch, d_loss, g_loss, real_score, Unrealscore):\n print(\"Epoch: %d, d_loss= %f, g_loss= %f, D(X)= %f, D(G(X))= %f\" % (\n epoch, d_loss.data.cpu().mean(), g_loss.data.cpu().mean(), real_score.data.cpu().mean(),\n Unrealscore.data.cpu().mean()))\n self.history_Disc.append(d_loss.data.cpu().mean())\n self.history_Gen.append(g_loss.data.cpu().mean())\n self.history_Discx.append(real_score.data.cpu().mean())\n self.history_DiscGx.append(Unrealscore.data.cpu().mean())",
"def _loss_function(self):\n\n def vae_loss(y_true, y_pred):\n #print(self.c_current)\n return K.mean(recon_loss(y_true, y_pred) + self.alpha *kl_loss(y_true, y_pred))\n\n def kl_loss(y_true, y_pred):\n return 0.5 * K.sum(K.exp(self.log_var) + K.square(self.mu) - 1. - self.log_var, axis=1)\n\n def kl_loss_monitor0(y_true, y_pred):\n klds = K.mean(K.exp(self.log_var) + K.square(self.mu) - 1. - self.log_var, axis=0)\n return klds[0]\n \n def kl_loss_monitor1(y_true, y_pred):\n klds = K.mean(K.exp(self.log_var) + K.square(self.mu) - 1. - self.log_var, axis=0)\n #K.print_tensor(klds)\n return klds[1]\n \n def kl_loss_monitor2(y_true, y_pred):\n klds = K.mean(K.exp(self.log_var) + K.square(self.mu) - 1. - self.log_var, axis=0)\n #K.print_tensor(klds)\n return klds[2]\n\n def kl_loss_monitor3(y_true, y_pred):\n klds = K.mean(K.exp(self.log_var) + K.square(self.mu) - 1. - self.log_var, axis=0)\n #K.print_tensor(klds)\n return klds[3]\n\n def kl_loss_monitor4(y_true, y_pred):\n klds = K.mean(K.exp(self.log_var) + K.square(self.mu) - 1. - self.log_var, axis=0)\n #K.print_tensor(klds)\n return klds[4]\n\n def recon_loss(y_true, y_pred):\n return 0.5 * K.sum(K.square((y_true - y_pred)), axis=1)\n\n self.vae_optimizer = keras.optimizers.Adam(lr=self.learning_rate)\n self.vae_model.compile(optimizer=self.vae_optimizer, loss=vae_loss, \n metrics=[kl_loss ,recon_loss,kl_loss_monitor0,kl_loss_monitor1,kl_loss_monitor2,kl_loss_monitor3,\n kl_loss_monitor4])",
"def lp_loss(generated, gt, l_num, batch_size_tf):\n lp_loss=tf.reduce_sum(tf.abs(generated - gt)**l_num)/(2*tf.cast(batch_size_tf,tf.float32))\n tf.add_to_collection('losses', lp_loss)\n\n loss = tf.add_n(tf.get_collection('losses'), name='total_loss')\n return loss",
"def build_loss(saved_for_loss, *gts):\n raise NotImplementedError",
"def loss_style(self, output, vgg_gt):\r\n loss = 0\r\n for o, g in zip(output, vgg_gt):\r\n loss += self.l1(self.gram_matrix(o), self.gram_matrix(g))\r\n return loss",
"def logger_iter_wgan(self, epoch, gen_iteration, d_loss, g_loss, real_loss, Unrealloss):\n print(\"Epoch: %d, Gen_iteration: %d, d_loss= %f, g_loss= %f, real_loss= %f, Unrealloss = %f\" %\n (epoch, gen_iteration, d_loss.data.cpu().mean(), g_loss.data.cpu().mean(), real_loss, Unrealloss))\n self.history_Disc.append(d_loss.data.cpu().mean())\n self.history_Gen.append(g_loss.data.cpu().mean())",
"def loss(self, value: float) -> None:\n self.scores['loss'].append(value)",
"def add_loss(self, avg_batch_loss: float, num_instances: int) -> None:\n self.losses.append(avg_batch_loss * num_instances)\n self.batch_sizes.append(num_instances)",
"def get_total_loss(splits=[''], collection='outputs', with_summaries=True, verbose=0): \n losses = []\n for split in splits:\n full_loss = 0.\n loss_collections = [x for x in tf.get_default_graph().get_all_collection_keys() if \n x.endswith('_loss') and x.startswith(split)]\n ## sum losses\n for key in loss_collections:\n collected = tf.get_collection(key)\n loss = tf.add_n(collected) / float(len(collected))\n full_loss += loss\n if with_summaries:\n base_name = key.split('_', 1)[0]\n tf.summary.scalar(key, loss, collections=[collection], family='train_%s' % base_name)\n\n ## Add regularization loss if any \n reg_losses = tf.losses.get_regularization_losses(scope='train/dev0/%s' % split)\n if len(reg_losses):\n regularization_loss = tf.add_n(reg_losses)\n full_loss += regularization_loss\n if with_summaries:\n tf.summary.scalar('%sregularization_loss' % split, regularization_loss, collections=[collection])\n\n ## Summary for the total loss in the current scope\n if with_summaries:\n tf.summary.scalar('%stotal_loss' % split, full_loss, collections=[collection]) \n \n ## Add losses and corresponding variables\n train_vars = tf.trainable_variables(scope=split)\n losses.append((full_loss, train_vars, split))\n if verbose == 2:\n print(' > \\033[33min %s scope:\\033[0m' % (split if split else \"global\"))\n print(' ', len(reg_losses), 'regularization losses found')\n if len(loss_collections):\n print('\\n'.join([\" *%s*: %s tensors\" % (x, len(tf.get_collection(x)))\n for x in loss_collections]))\n else:\n print(' \\033[31mWarning:\\033[0m No losses found with base name', split)\n print(' Trainable variables: [%s]' % ', '.join(list(map(lambda x: x.name, train_vars))))\n return losses",
"def make_log_hooks(global_step, loss):\n hooks = []\n\n def summ_formatter(d):\n return \"Step {step}, loss: {loss:.5f}\".format(**d)\n\n loss_hook = tf.train.LoggingTensorHook({\n \"step\": global_step,\n \"loss\": loss\n },\n every_n_iter=FLAGS.summarize_every,\n formatter=summ_formatter)\n hooks.append(loss_hook)\n if tf.get_collection(\"infrequent_summaries\"):\n infrequent_summary_hook = tf.train.SummarySaverHook(\n save_steps=1000,\n output_dir=os.path.join(FLAGS.logdir, exp_name()),\n summary_op=tf.summary.merge_all(key=\"infrequent_summaries\"))\n hooks.append(infrequent_summary_hook)\n return hooks",
"def add_wd(op, wd):\n params = get_params(op)\n for param in params:\n weight_decay = tf.multiply(tf.nn.l2_loss(param), wd)\n tf.add_to_collection(tf.GraphKeys.LOSSES, weight_decay)\n return op",
"def run_with_custom_entropy_loss():\n\n def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):\n logits, _ = model.from_batch(train_batch)\n action_dist = dist_class(logits, model)\n return (-0.1 * action_dist.entropy() - tf.reduce_mean(\n action_dist.logp(train_batch[\"actions\"]) *\n train_batch[\"advantages\"]))\n\n EntropyPolicy = PGTFPolicy.with_updates(\n loss_fn=entropy_policy_gradient_loss)\n EntropyLossPG = PGTrainer.with_updates(\n name=\"EntropyPG\", get_policy_class=lambda _: EntropyPolicy)\n run_heuristic_vs_learned(use_lstm=True, trainer=EntropyLossPG)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove handlers from all loggers
|
def clear_loggers():
import logging
loggers = [logging.getLogger()] + list(logging.Logger.manager.loggerDict.values())
for logger in loggers:
handlers = getattr(logger, "handlers", [])
for handler in handlers:
logger.removeHandler(handler)
|
[
"def remove_handlers_root_logger_object():\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)",
"def remove_handlers(self):\n\n self.log.removeHandler(self.fileHandler)\n if self.streamHandler:\n self.log.removeHandler(self.streamHandler)",
"def delete_logging_handlers(logger: logging.Logger):\n if len(logger.handlers) > 1:\n logger.handlers = [\n h for h in logger.handlers if type(h) == logging.StreamHandler\n ]\n assert len(logger.handlers) == 1, \"Multiple logging StreamHandlers present!!\"",
"def remove_all_subscriber():\n LogManager._LogManager__implementation.remove_all_subscriber()",
"def __del__(self):\n handlers = copy.copy(self.logger.handlers)\n for handler in handlers:\n handler.close()\n self.logger.removeHandler(handler)\n self.logger = None\n self.c_simulation = None",
"def removeHandler(self):\n mainLogger = logging.getLogger()\n if self.handler is not None:\n mainLogger.removeHandler(self.handler)\n self.handler = None",
"def _removeHandler(logger, handler=None):\n if handler is None:\n for logHandler in logger.handlers:\n logger.removeHandler(logHandler)\n else:\n logger.removeHandler(handler)",
"def _shutdown_logger(logger):\n\n for handler in logger.handlers:\n handler.flush()\n handler.close()",
"def end_logging():\n logger = logging.getLogger(\"TopLog\")\n logging.captureWarnings(False)\n all_handlers = [h for h in logger.handlers]\n for h in all_handlers:\n logger.removeHandler(h)",
"def syslog_off(self):\n self.logger.removeHandler(self.syslog_handler)",
"def decorate_handlers(logger=None):\n if logger is None:\n logger = logging.getLogger()\n handlers = list(logger.handlers)\n for h in handlers:\n logger.removeHandler(h)\n logger.addHandler(HandlerDecorator(h))",
"def clear_logging() -> None:\n logger = logging.getLogger('mltk')\n logger.propagate = True\n logger.setLevel(logging.NOTSET)\n logger.handlers.clear()",
"def remove_log_filter(target_logger, log_filter):\n for handler in target_logger.handlers:\n handler.removeFilter(log_filter)",
"def clear_handlers():\n global _handlers\n _handlers = []",
"def remove_file_logger():\n h = get_current_logfile_handler()\n if h is not None:\n h.close()\n nox_logger.removeHandler(h)",
"def flush(self) -> None:\n for handler in self.logger.handlers:\n handler.flush()",
"def unset_logger():\n raise NotImplementedError('Unset logger function is not implemented yet.')",
"def clear_event_handlers() -> None:\n _subscriptions.clear()",
"def removeLogFile(self, logFile):\n global _logFileHandlers\n logFilePath = os.path.abspath(logFile)\n if logFilePath.lower() in _logFileHandlers:\n handler = _logFileHandlers.pop(logFilePath.lower())\n logging.getLogger().removeHandler(handler)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function uses the SARIMAX model from statsmodels. It predicts 'y' by using 'x' as exogenous variables. For each exogenous variable, the function takes 1 lag at a distance of 'h' time units. It forecasts 'h_max' time steps in future. Arguments
|
def forecast_SARIMAX(window: int, n_train: int, p: int, d: int, q: int, ps: int, ds: int, qs: int, m: int,
x: list[str], y: str, h_max: int, transf: Callable[[float], float], itransf: Callable[[float], float] )-> pd.DataFrame:
df_US = pd.read_csv('OWID_weekly.csv')
df_US.index = pd.to_datetime(df_US['date'])
date = df_US['date']
date_list = date.tolist()
df_US = df_US.drop(columns=['date'])
#Apply transformation
df_US_transf = pd.DataFrame()
for col in df_US.columns:
df_US_transf[col] = df_US[col].apply(lambda x: transf(x))
#List of exogenous variables
total_features = ['icu_patients', 'hosp_patients','positive_rate','new_cases','new_tests' , 'people_vaccinated', 'people_fully_vaccinated']
df_y = df_US_transf[y]
df_x = df_US_transf[total_features]
value = np.empty(h_max)
index = list()
#Predicting weeks in future according to the horizon (h)
for h in range(1,h_max+1):
#Shifting exogenous variables according to the horizon (h)
x_lagged_variables = df_x.shift(h).bfill()
exog_variables = x_lagged_variables[x]
# Its a moving window that starts from the window value till the train size plus the 'h' time steps in future
df_window = df_y[int(window-1):int(window-1+n_train+h)]
i = h-1
#Defining train and test data
col_train = df_y.loc[df_window.index][0:n_train]
exo_train = exog_variables.loc[df_window.index][0:n_train]
exo_test = exog_variables.loc[df_window.index][n_train:]
#Model definition for prediction
model = SARIMAX(col_train, exog=exo_train, order=(p,d,q), seasonal_order=(ps,ds,qs,m), enforce_stationarity=False, enforce_invertibility=False)
model_fit = model.fit(disp=False)
forecast = model_fit.predict(len(col_train), len(col_train)+i, exog=exo_test)
#Inverse transformation
forecast = itransf(forecast)
forecast_f = forecast.to_frame()
#For each horizon, the function selectively uses values that have a lag/shift equal to the specific horizon
if h == 1:
df1 = forecast_f.rename(columns= {0: y})
value[i] = df1[y].iloc[i]
index.append(date_list[n_train+window-1])
else:
df2 = forecast_f.rename(columns= {'predicted_mean': y})
value[i] = df2[y].iloc[i]
index.append(date_list[n_train+window-2+h])
data = {'index': index, y: value}
forecast_f = pd.DataFrame(data)
forecast_f['index'] = pd.to_datetime(forecast_f['index'])
# Set the 'index' column as the DataFrame's index
forecast_f.set_index('index', inplace=True)
return forecast_f
|
[
"def esm_arima(ts):\n\n test_n = 60\n ses = []\n trend = []\n dtrend = []\n arima = []\n j=0\n \n for i in range(test_n,0,-1): #(60,59,58...3,2,1)\n # moving window, walk foward 1 step \n train = np.asarray(ts[j:len(ts)-i])\n j= j+1\n \n # 3 different types of ESM models. Each 1 makes 1 step ahead predictions\n ses.append(SimpleExpSmoothing(train).fit(optimized = True).\\\n forecast(1)[0])\n \n trend.append(ExponentialSmoothing(train, \n trend='add',\n damped=False,\n seasonal='None').fit(optimized = True).\\\n forecast(1)[0])\n \n dtrend.append(ExponentialSmoothing(train, \n trend='add',\n damped=True,\n seasonal='None').fit(optimized = True).\\\n forecast(1)[0])\n \n # Auto arima model makes 1 step ahead prediction.\n model = auto_arima(train, trace=False, error_action='ignore', \n suppress_warnings=True, max_p=15, max_q=15,\n d=0, D=0, max_order=20, seasonal = False)\n model.fit(train)\n forecast = model.predict(n_periods=1)\n \n arima.append(forecast)\n \n print('done with step: ', j)\n \n test = ts.tail(test_n) # test set\n \n # naive forecast predicts no change in price aka return = 0\n naive_mae = mean_absolute_error([0] * test_n, test)\n \n # calculate MAE for all 4 model types\n ses_mae = mean_absolute_error(ses, test)\n trend_mae = mean_absolute_error(trend, test)\n dtrend_mae = mean_absolute_error(dtrend, test)\n arima_mae = mean_absolute_error(arima, test)\n \n # calculate MASE for all 4 model types\n ses_mase = ses_mae / naive_mae\n trend_mase = trend_mae / naive_mae\n dtrend_mase = dtrend_mae / naive_mae\n arima_mase = arima_mae / naive_mae\n \n # create list of all metrics\n metrics = [naive_mae, ses_mae, trend_mae, dtrend_mae, arima_mae,\n ses_mase, trend_mase, dtrend_mase, arima_mase]\n \n return(metrics)",
"def build_model(series, p, d, q, S, exog_data, P=None, D=None, Q=None):\n if P is None:\n P = p\n if D is None:\n D = d\n if Q is None:\n Q = q\n model = SARIMAX(series, order=(p,d,q),\n seasonal_order=(P,D,Q,S),\n exog=exog_data,\n enforce_invertibility=True)\n results = model.fit()\n return results",
"def iterative_forecast(\n predictor: Predictor,\n train: pd.DataFrame,\n lags: int,\n exog: pd.DataFrame = None,\n periods: int = 1,\n against_baseline = False\n ):\n\n add_exog = exog is not None\n \n variables = train.columns.tolist()\n \n # Define first period to forecast\n last_day = train.index[-1]\n append_period = last_day - relativedelta(months=lags - 1)\n\n endog_X = train[append_period:last_day].to_numpy()\n\n if add_exog:\n exog_X = exog[append_period:last_day].to_numpy()\n iter_day = last_day\n \n y_hat = []\n \n for _ in range(periods):\n\n if add_exog:\n X = fit_into_row(endog_X, exog_X)\n\n # Update the exogenous variable\n exog_X[:-1] = exog_X[1:]\n iter_day = last_day + relativedelta(months=1)\n exog_X[-1] = exog.loc[iter_day]\n\n else:\n X = endog_X.reshape(-1, 1)\n\n forecast = predictor(X)\n \n endog_X[:-1] = endog_X[1:]\n endog_X[-1] = forecast[\"mean\"]\n \n y_hat.append(forecast)\n\n \n end_date = last_day + relativedelta(months=periods - 1) \n index = pd.date_range(start=last_day, end=end_date, freq=\"MS\")\n compact_forecast = pd.DataFrame(y_hat, index = index)\n \n df = pd.DataFrame(index = index)\n \n # Expand columns\n for column in compact_forecast.columns:\n \n expanded_column = compact_forecast[column].apply(pd.Series)\n expanded_column = expanded_column.rename(columns = lambda i: f\"{variables[i]}_{column}\")\n \n df = pd.concat([df[:], expanded_column[:]], axis=1)\n\n if against_baseline:\n baseline_model = AR1(train)\n baseline_forecast = baseline_model(periods - 1)\n\n df = pd.concat((df, baseline_forecast), axis = 1)\n\n \n return df",
"def single_forecast(model, x):\n # makes a prediction (in log space)\n prediction = (model.intercept_ + model.coef_ * x)\n # prints a prediction (in arithmetic space)\n print('forecast value', np.exp(prediction).round(2))\n # returns prediction (in log space)\n return prediction\n\n # %%\n # Input start and end dates",
"def get_predictions(endogenous, exogenous, max_value_functions, not_interpolated):\n # Define ordinary least squares model and fit to the data.\n beta = ols(endogenous, exogenous[not_interpolated])\n\n # Use the model to predict EMAX for all states. As in Keane & Wolpin (1994),\n # negative predictions are truncated to zero.\n endogenous_predicted = exogenous.dot(beta)\n endogenous_predicted = clip(endogenous_predicted, 0)\n\n # Construct predicted EMAX for all states and the\n predictions = endogenous_predicted + max_value_functions\n predictions[not_interpolated] = endogenous + max_value_functions[not_interpolated]\n\n if not np.all(np.isfinite(beta)):\n warnings.warn(\"OLS coefficients in the interpolation are not finite.\")\n\n return predictions",
"def arima_train(df, feature):\n order, seasonal_order = arima_parameters(df, feature)\n mod = sm.tsa.statespace.SARIMAX(df[feature].resample('MS').mean(),\n order=order,\n seasonal_order=seasonal_order,\n enforce_stationarity=False,\n enforce_invertibility=False)\n results = mod.fit()\n return results",
"def recursive(self, y, model):\n \n # get the dates to forecast\n last_date = y.index[-1] + pd.Timedelta(hours=1)\n fcast_range = pd.date_range(last_date, periods=self.n_steps, freq=self.step)\n\n fcasted_values = []\n target = y.copy()\n\n for date in fcast_range:\n\n new_point = fcasted_values[-1] if len(fcasted_values) > 0 else 0.0 \n target = target.append(pd.Series(index=[date], data=new_point))\n\n # forecast\n ts_features = create_ts_features(target)\n if len(self.lags) > 0:\n lags_features = create_lag_features(target, lags=self.lags)\n features = ts_features.join(lags_features, how=\"outer\").dropna()\n else:\n features = ts_features\n \n predictions = model.predict(features)\n fcasted_values.append(predictions[-1])\n\n return pd.Series(index=fcast_range, data=fcasted_values)",
"def test_create_train_X_y_output_when_y_is_series_10_and_exog_is_dataframe_of_float_int_category_steps_3():\n series = pd.DataFrame({'l1': pd.Series(np.arange(10), dtype=float), \n 'l2': pd.Series(np.arange(50, 60), dtype=float)})\n exog = pd.DataFrame({'exog_1': pd.Series(np.arange(100, 110), dtype=float),\n 'exog_2': pd.Series(np.arange(1000, 1010), dtype=int),\n 'exog_3': pd.Categorical(range(100, 110))})\n \n forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',\n lags=5, steps=3)\n results = forecaster.create_train_X_y(series=series, exog=exog) \n expected = (\n pd.DataFrame(\n data = np.array([[4., 3., 2., 1., 0., 54., 53., 52., 51., 50., 105, 1005, 105, 106, 1006, 106, 107, 1007, 107],\n [5., 4., 3., 2., 1., 55., 54., 53., 52., 51., 106, 1006, 106, 107, 1007, 107, 108, 1008, 108],\n [6., 5., 4., 3., 2., 56., 55., 54., 53., 52., 107, 1007, 107, 108, 1008, 108, 109, 1009, 109]],\n dtype=float),\n index = pd.RangeIndex(start=7, stop=10, step=1),\n columns = ['l1_lag_1', 'l1_lag_2', 'l1_lag_3', 'l1_lag_4', 'l1_lag_5', \n 'l2_lag_1', 'l2_lag_2', 'l2_lag_3', 'l2_lag_4', 'l2_lag_5',\n 'exog_1_step_1', 'exog_2_step_1', 'exog_3_step_1', \n 'exog_1_step_2', 'exog_2_step_2', 'exog_3_step_2', \n 'exog_1_step_3', 'exog_2_step_3', 'exog_3_step_3']\n ).astype({'exog_1_step_1': float, 'exog_2_step_1': int,\n 'exog_1_step_2': float, 'exog_2_step_2': int,\n 'exog_1_step_3': float, 'exog_2_step_3': int}\n ).assign(exog_3_step_1=pd.Categorical(range(105, 108), categories=range(100, 110)),\n exog_3_step_2=pd.Categorical(range(106, 109), categories=range(100, 110)),\n exog_3_step_3=pd.Categorical(range(107, 110), categories=range(100, 110))\n ),\n {1: pd.Series(\n data = np.array([5., 6., 7.], dtype=float), \n index = pd.RangeIndex(start=5, stop=8, step=1),\n name = \"l1_step_1\"\n ),\n 2: pd.Series(\n data = np.array([6., 7., 8.], dtype=float), \n index = pd.RangeIndex(start=6, stop=9, step=1),\n name = \"l1_step_2\"\n ),\n 3: pd.Series(\n data = np.array([7., 8., 9.], dtype=float), \n index = pd.RangeIndex(start=7, stop=10, step=1),\n name = \"l1_step_3\"\n )\n }\n )\n\n pd.testing.assert_frame_equal(results[0], expected[0])\n assert isinstance(results[1], dict)\n assert all(isinstance(x, pd.Series) for x in results[1].values())\n assert results[1].keys() == expected[1].keys()\n for key in expected[1]: \n pd.testing.assert_series_equal(results[1][key], expected[1][key])",
"def test_create_train_X_y_output_when_y_is_series_10_and_exog_is_dataframe_of_float_int_category_steps_1():\n series = pd.DataFrame({'l1': pd.Series(np.arange(10), dtype=float), \n 'l2': pd.Series(np.arange(50, 60), dtype=float)})\n exog = pd.DataFrame({'exog_1': pd.Series(np.arange(100, 110), dtype=float),\n 'exog_2': pd.Series(np.arange(1000, 1010), dtype=int),\n 'exog_3': pd.Categorical(range(100, 110))})\n \n forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',\n lags=5, steps=1)\n results = forecaster.create_train_X_y(series=series, exog=exog) \n expected = (\n pd.DataFrame(\n data = np.array([[4., 3., 2., 1., 0., 54., 53., 52., 51., 50., 105., 1005.],\n [5., 4., 3., 2., 1., 55., 54., 53., 52., 51., 106., 1006.],\n [6., 5., 4., 3., 2., 56., 55., 54., 53., 52., 107., 1007.],\n [7., 6., 5., 4., 3., 57., 56., 55., 54., 53., 108., 1008.],\n [8., 7., 6., 5., 4., 58., 57., 56., 55., 54., 109., 1009.]], \n dtype=float),\n index = pd.RangeIndex(start=5, stop=10, step=1),\n columns = ['l1_lag_1', 'l1_lag_2', 'l1_lag_3', 'l1_lag_4', 'l1_lag_5', \n 'l2_lag_1', 'l2_lag_2', 'l2_lag_3', 'l2_lag_4', 'l2_lag_5',\n 'exog_1_step_1', 'exog_2_step_1']\n ).astype({'exog_1_step_1': float, \n 'exog_2_step_1': int}).assign(exog_3_step_1=pd.Categorical(range(105, 110), categories=range(100, 110))\n ),\n {1: pd.Series(\n data = np.array([5., 6., 7., 8., 9.], dtype=float), \n index = pd.RangeIndex(start=5, stop=10, step=1),\n name = \"l1_step_1\"\n )\n }\n )\n\n pd.testing.assert_frame_equal(results[0], expected[0])\n assert isinstance(results[1], dict)\n assert all(isinstance(x, pd.Series) for x in results[1].values())\n assert results[1].keys() == expected[1].keys()\n for key in expected[1]: \n pd.testing.assert_series_equal(results[1][key], expected[1][key])",
"def synthetic_with_exogenous_ts():\n task = Task(TaskTypesEnum.ts_forecasting,\n TsForecastingParams(forecast_length=forecast_length))\n\n # Time series with exogenous variable\n ts_train = np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130])\n ts_exog = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])\n\n ts_test = np.array([140, 150, 160, 170])\n ts_test_exog = np.array([24, 25, 26, 27])\n\n # Indices for forecast\n start_forecast = len(ts_train)\n end_forecast = start_forecast + forecast_length\n\n # Input for source time series\n train_source_ts = InputData(idx=np.arange(0, len(ts_train)),\n features=ts_train, target=ts_train,\n task=task, data_type=DataTypesEnum.ts)\n predict_source_ts = InputData(idx=np.arange(start_forecast, end_forecast),\n features=ts_train, target=None,\n task=task, data_type=DataTypesEnum.ts)\n\n # Input for exogenous variable\n train_exog_ts = InputData(idx=np.arange(0, len(ts_train)),\n features=ts_exog, target=ts_train,\n task=task, data_type=DataTypesEnum.ts)\n predict_exog_ts = InputData(idx=np.arange(start_forecast, end_forecast),\n features=ts_test_exog, target=None,\n task=task, data_type=DataTypesEnum.ts)\n return train_source_ts, predict_source_ts, train_exog_ts, predict_exog_ts, ts_test",
"def _point_deseasonalized_forecast(self, query, t):\n\t\tmodel = RandomForestRegressor(n_estimators = 128, #100\n\t\t\t\t\t\t\t\t\t max_features = 'sqrt', #17\n\t\t\t\t\t\t\t\t\t bootstrap = True,\n\t\t\t\t\t\t\t\t\t max_samples = None, #low bootstrap size 0.3\n\t\t\t\t\t\t\t\t\t max_depth =None, #5\n\t\t\t\t\t\t\t\t\t min_samples_split = 2,\n\t\t\t\t\t\t\t\t\t n_jobs = 4,\n\t\t\t\t\t\t\t\t\t ccp_alpha = 0.0, #no cost-complexity pruning\n\t\t\t\t\t\t\t\t\t verbose = 0)\n\t\tX, y = self._learning_set(query, t) #(local) learning set\n\t\tmodel.fit(X,y) #Fit local model\n\t\tx_query = query._predictor.reshape(1,-1) #Predictor\n\t\tyhat = model.predict(x_query) \n\t\treturn yhat",
"def harmonicModelAnal(x, fs, window, fft_size, hop_size, min_fft_val, nSines, minf0, maxf0, f0et, harmDevSlope=0.01, minSineDur=.02):\n\n\tif (minSineDur <0): # raise exception if minSineDur is smaller than 0\n\t\traise ValueError(\"Minimum duration of sine tracks smaller than 0\")\n\t\t\n\t#hN = fft_size / 2 # size of positive spectrum\n\thM1 = int(math.floor((window.size + 1) / 2)) # half analysis window size by rounding\n\thM2 = int(math.floor(window.size / 2)) # half analysis window size by floor\n\tx = np.append(np.zeros(hM2), x) # add zeros at beginning to center first window at sample 0\n\tx = np.append(x, np.zeros(hM2)) # add zeros at the end to analyze last sample\n\tpin = hM1 # init sound pointer in middle of anal window \n\tpend = x.size - hM1 # last sample to start a frame\n\t#fftbuffer = np.zeros(fft_size) # initialize buffer for FFT\n\twindow = window / sum(window) # normalize analysis window\n\thfreqp = [] # initialize harmonic frequencies of previous frame\n\tf0t = 0 # initialize f0 track\n\tf0stable = 0 # initialize f0 stable\n\n\twhile pin<=pend:\n\t\t#print(\"pin:\", pin, \" pend:\", pend)\n\t\tx1 = x[pin-hM1:pin+hM2] # select frame\n\t\t#--------- harmonic Analysis frame\n\t\t# mX, pX = DFT.dftAnal(x1, w, N) # compute dft \n\t\t# ploc = UF.peakDetection(mX, t) # detect peak locations \n\t\t# iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values\n\t\t# ipfreq = fs * iploc/N # convert locations to Hz\n\t\t# f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0\n\t\t# if ((f0stable==0)&(f0t>0)) \\\n\t\t# \t\tor ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)):\n\t\t# \tf0stable = f0t # consider a stable f0 if it is close to the previous one\n\t\t# else:\n\t\t# \tf0stable = 0\n\t\t# hfreq, hmag, hphase = harmonicDetection(ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs, harmDevSlope) # find harmonics\n\t\t#-----------\n\t\tuseTWM=0\n\t\tmX, f0stable, f0t, hfreq, hmag, hphase = harmonicModelAnalFrame (x1, window, fft_size, min_fft_val, fs, hfreqp, f0et, minf0, maxf0, nSines, f0stable, harmDevSlope, useTWM)\n\t\thfreqp = hfreq #hfreq(previous)\n\t\tif pin == hM1: # first frame\n\t\t\txhfreq = np.array([hfreq])\n\t\t\txhmag = np.array([hmag])\n\t\t\txhphase = np.array([hphase])\n\t\telse: # next frames\n\t\t\txhfreq = np.vstack((xhfreq,np.array([hfreq])))\n\t\t\txhmag = np.vstack((xhmag, np.array([hmag])))\n\t\t\txhphase = np.vstack((xhphase, np.array([hphase])))\n\t\tpin += hop_size # advance sound pointer\n\txhfreq = SM.cleaningSineTracks(xhfreq, round(fs * minSineDur / hop_size)) # delete tracks shorter than minSineDur\n\treturn xhfreq, xhmag, xhphase, f0stable",
"def SARIMAX_error(series, p=10, d=2, q=2):\n \n \n X = series\n \n # set trainset to include all but last 48 months (4 years) only training on data between 9-4 years ago\n train_size = int(len(X) - 48)\n train, test = X[-108:train_size], X[train_size:]\n\n model = SARIMAX(train, order=(p,d,q), freq='MS', initialization='approximate_diffuse')\n\n results = model.fit()\n\n # Predict 48 months from end of train set\n forecast = results.get_forecast(steps=48)\n pred_ci = forecast.conf_int(alpha=.05)\n\n predictions = forecast.predicted_mean\n\n rmse = RMSE(test, predictions)\n pct = error_as_pct(rmse, train[-1], test[-1])\n \n return pred_ci, rmse, pct, (train[-1], test[-1])",
"def autoregressive_analysis(data):\n # from statsmodels.graphics.tsaplots import plot_pacf\n from sklearn.metrics import mean_squared_error\n from statsmodels.tsa.arima_model import ARIMA\n\n appliance_series = pd.read_csv(data, header=0, index_col=0, parse_dates=True, usecols=[0, 1])\n # print(appliance_series.describe())\n plt.plot(appliance_series)\n plt.show()\n # plot_pacf(appliance_series[\"Appliances\"])\n train = appliance_series.loc['2016-01-01':'2016-03-31']\n test = appliance_series.loc['2016-04-01':]\n p, q = 1, 8\n arma_model = ARIMA(train, order=(p, 0, q))\n arma_model_fit = arma_model.fit()\n pred = arma_model_fit.predict(start='2016-04-01 00:00:00', end='2016-05-27 18:00:00')\n print('MSE:', mean_squared_error(test, pred))\n plt.plot(test)\n plt.plot(pred)\n plt.show()",
"def fit_predict(self, test_time_range):\n\t\t#Date boundaries for test\n\t\tfirst_date = test_time_range[0].date()\n\t\tlast_date = test_time_range[-1].date()\n\t\tn_days = (last_date - first_date).days + 1 #retrive days attribute from timedelta\n\t\t#Empty arrays to store forecast\n\t\tforecast_length = n_days * self._cycle_length #Length of forecast vectors\n\t\tdaily_seasonal_forecast = empty(forecast_length)\n\t\tweekly_seasonal_forecast = empty(forecast_length)\n\t\tdeseasonalized_forecast = empty(forecast_length)\n\t\t#Align linked list with beginning of test\n\t\ttrav = self._patterns._tail \n\t\twhile trav._date != first_date:\n\t\t\ttrav = trav._prev \n\t\t#Forecast routine\n\t\tk = 0 #Forecast day counter\n\t\twhile trav and trav._date != last_date + self._cycle_length * Timedelta(1, 'H'): #Traverse patterns day by day\n\t\t\tprint(\"Forecasting day \", k+1, \" of \", n_days) #Progress\n\t\t\t#deseasonalized component forecast\n\t\t\tdeseasonalized_comp = empty(self._cycle_length) #Empty vector to store values\n\t\t\tfor t in range(self._cycle_length):\n\t\t\t\telement = self._point_deseasonalized_forecast(trav, t)\n\t\t\t\tdeseasonalized_forecast[k*self._cycle_length + t] = element\t\n\t\t\tdaily_seasonal_forecast[k * self._cycle_length: (k+1) * self._cycle_length] = self._season_forecast(trav, 24) #First seasonal\tcomponent\n\t\t\tweekly_seasonal_forecast[k * self._cycle_length: (k+1) * self._cycle_length] = self._season_forecast(trav, 168) #Second seasonalcomponent\t\n\t\t\ttrav = trav._next #Move to next day\n\t\t\tk = k + 1 #Increase forecast day counter\n\t\t#Store predicitions in model - convert to pandas Series\n\t\tself._full_forecast = Series(deseasonalized_forecast + weekly_seasonal_forecast + daily_seasonal_forecast, index=test_time_range)\n\t\tself._deseasonalized_forecast = Series(deseasonalized_forecast, index=test_time_range)\n\t\tself._daily_seasonal_forecast = Series(daily_seasonal_forecast, index=test_time_range)\n\t\tself._weekly_seasonal_forecast = Series(weekly_seasonal_forecast, index=test_time_range)\n\n\t\treturn self._full_forecast",
"def predict(model, ts_test):\r\n n_periods = ts_test.shape[0]\r\n df_dates = model.make_future_dataframe(periods=n_periods, include_history=False)\r\n model_prediction = model.predict(df_dates)\r\n y_pred = model_prediction[['ds', 'yhat']]\r\n y_pred = y_pred.set_index('ds')\r\n y_pred['yhat'] = y_pred['yhat']\r\n return y_pred['yhat']",
"def ELRscript(model,mon,fday,fyr,day1,day2,nday,hdate_last,lit,liti,wk,nla1,sla1,wlo1,elo1,nla2,sla2,wlo2,elo2,fprefix,mpref,training_season,ntrain,rainfall_frequency,MOS):\n\n#%% model Hindcasts \n\tfh_xh = Dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc', mode='r')\n\tfh_yh = Dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc', mode='r')\n\n\tlons = fh_xh.variables['X'][:]\n\tlats = fh_xh.variables['Y'][:]\n\n\tx = fh_xh.variables['tp'][:]; x = np.squeeze(x)\n\ty = fh_yh.variables['tp'][:]\n\tndat1, nlat, nlon = np.shape(x)\n\tx1=x[:,1,1]\n\tI = np.where(x1>10000)\n\tbad_value_num=len(x1[I])\n\tndat=ndat1-bad_value_num\n\n#%% ELR: Train the models\n# Make a dictionary to contain the 'LogisticRegression' objects and terciles\n\telr_dict = {} # create an empty dictionary\n\telr_climo_dict = {} # create an empty dictionary for the climo forecast\n\n\tym = np.mean(y,axis=0)\n\tmsk = ma.getmask(ym)\n\tindex_land = np.empty((nlat,nlon),dtype=int)\n\txm0 = x\n\t#xm = xm0[0:int(ndat/2),:,:]\n\txm = xm0[0:lit,:,:]\n\n\tx0 = np.zeros(np.shape(xm)) # array of zeros to construct the climo forecast\n\tijland = -1\n\tfor j in range(nlat):\n\t# print(\"in j loop, j=\", j)\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\tindex_land[j,i] = ijland # index of land points\n\t\t\t\t#elr_dict[ijland] = elr_fit(xm[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\t#elr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\telr_dict[ijland] = elr_fit(xm[:,j,i], y[0:lit,j,i])\n\t\t\t\telr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:lit,j,i])\n\t\t\t# ijland is the dictionary key that can be used to assess the entries, like this\n\t\t\t# mymodel, mytercs = mydict[0]\n\t\t\t# mymodel.coef_\n\tnland = ijland+1\n\t#print('ELR training done with total landpoints = ',nland)\n\n\t#%% Make set of ELR in-sample hindcasts (no XV)\n\t#elr_hc = np.empty((ndat,nlat,nlon,3)); elr_hc.fill(np.nan)\n\t#elr_hc = np.empty((int(ndat/2),nlat,nlon)); elr_hc.fill(np.nan)\n\telr_hc = np.empty((lit,nlat,nlon)); elr_hc.fill(np.nan)\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\t#elr_hc[:,j,i,:] = elr_tercilesPredict(xm[:,j,i], terciles, elrmodel)\n\t\t\t\telr_hc[:,j,i] = elr_quantilePredict(xm[:,j,i], elrmodel)\n\n# ijland = index_land[lat1, lon1]\n# elrmodel, terciles = elr_dict[ijland]\n# elrmodel_climo, terciles = elr_climo_dict[ijland]\n# poe, q_fcst, q_clim, = elr_poe( xm[idat,lat1,lon1], elrmodel, elrmodel_climo )\n# plt.figure()\n\n\t#print('Set of ELR hindcasts made on a map of xy gridpoints')\n#---------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\t#T=int(ndat/2)\n\tT=lit\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(elr_hc, axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tTarr=np.empty(ndat,dtype=int)\n\tfor it in range(ndat):\n\t\tTarr[it]=1901+it\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(y[0:lit,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tT1=lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\tndat_fc = ndat-lit\n\txf = x[lit:ndat,:,:]\n\tyf = y[lit:ndat,:,:]\n\n#%% Verification period\n########################################\n\n\telr_fc = np.empty((ndat_fc,nlat,nlon,3)); elr_fc.fill(np.nan)\n\trpss_ELR_fc = np.ma.array(np.empty((nlat,nlon)), mask=msk, fill_value=np.nan)\n\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\telr_fc[:,j,i,:] = elr_tercilesPredict(xf[:,j,i], terciles, elrmodel)\n\t#print('Set of ELR forcasts made on a map of xy gridpoints')\n\n#----------------------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_verification.txt'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tf.write(\"cpt:ncats=3\\n\")\n\tW=nlon\n\tH=nlat\n\tds=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\tT=ndat-lit\n\tTarr1=Tarr[lit:]\n\tXarr=lons\n\tYarr1=lats\n\tYarr=Yarr1[::-1] #Y should from N to S\n\tvari='tp'\n\tvar=np.flip(elr_fc, axis=1)*100\n\tvar[np.isnan(var)]=-1.0 #use CPT missing value\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:C=1, cpt:clim_prob=0.33333333333300003, cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=probability (%), cpt:missing=-1.0000000000000000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,0]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=2, cpt:clim_prob=0.33333333333400000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,1]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=3, cpt:clim_prob=0.33333333333299997\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,2]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_verification.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\t#var=np.flip(y[int(ndat/2):,:,:], axis=1)\n\tvar=np.flip(y[lit:,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\t#T1=int(ndat/2)\n\tT1=ndat-lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()",
"def bayesian_forecast(self, data, model_params, model_path=\"model\"):\n\n self.logger.info(\"Using bayesian model\")\n\n cases = self.prepare_data(data[[self.geoid_col, self.metric]])\n cases_data = cases[0]\n list_geo = cases[1]\n n_geo = cases[2]\n geo_first_dates = cases[4]\n\n n_days = model_params[\"forecast_days\"]\n\n # Construct new vector with forecast target\n time_index = np.arange(0, n_days, 1)\n time_index = np.repeat(time_index, n_geo)\n\n # Construct geo vector\n geo_index = np.arange(n_geo)\n geo_index = np.tile(geo_index, n_days)\n dummy_y = np.zeros(len(time_index))\n\n # Generate the inference model\n inference_model = self.bayesian_model(x=time_index, y=dummy_y, index=geo_index, n=n_geo, model_params=model_params)\n\n # Sampling from posterior\n self.logger.info(\"Inferencing...\")\n\n # load model\n model_path = os.path.join(model_path, f\"{self.cumulative_metric}_bayesian_forecast.trace\")\n\n with inference_model:\n trace = pm.load_trace(model_path)\n posterior = pm.sample_posterior_predictive(trace)\n\n # Calculate credible interval\n credible_interval = az.hdi(\n posterior[\"cases\"], hdi_prob=.95\n )\n\n # Calculate dates\n start = [geo_first_dates[x] for x in list_geo[geo_index].tolist()]\n offset = [pd.DateOffset(x) for x in time_index]\n dates = list(\n map(lambda x: (x[0] + x[1]).to_pydatetime(), zip(start, offset))\n )\n\n # Create result dataframe\n forecast = pd.DataFrame(\n {\n self.date_col: dates,\n self.geoid_col: list_geo[geo_index],\n f\"{self.cumulative_metric}_bayesian_forecast\": np.mean(posterior[\"cases\"], axis=0),\n f\"{self.cumulative_metric}_bayesian_credible_interval_low\": credible_interval[:, 0],\n f\"{self.cumulative_metric}_bayesian_credible_interval_high\": credible_interval[:, 1]\n },\n index=dates,\n ).rename_axis(\"index\")\n\n # Merge with ground truth\n forecast = pd.merge(\n forecast.rename_axis(\"index\").reset_index(),\n cases_data[[self.geoid_col, self.cumulative_metric]].rename_axis(\"index\").reset_index(),\n on=[\"index\", self.geoid_col],\n how=\"outer\"\n ).set_index(\"index\")\n\n return forecast",
"def forward(self, h_prev, x_t):\n # softmax(arr, axis=0)\n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n cat = np.concatenate((h_prev, x_t), axis=1)\n # print('meow', cat.shape)\n h_next = np.tanh(cat @ self.Wh + self.bh)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, y"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a compiler ("markdown", "rest"), and whether it's meant for a post or a page, and compilers, return the correct entry from post_pages.
|
def filter_post_pages(compiler, content_type, compilers, post_pages):
# First throw away all the post_pages with the wrong is_post
is_post = False if content_type == 'page' else True
filtered = [entry for entry in post_pages if entry[3] == is_post]
# These are the extensions supported by the required format
extensions = compilers[compiler]
# Throw away the post_pages with the wrong extensions
filtered = [entry for entry in filtered if any([ext in entry[0] for ext in
extensions])]
if not filtered:
raise Exception("Can't find a way, using your configuration, to create "
"a {0} in format {1}. You may want to tweak "
"COMPILERS or {2}S in conf.py".format(
content_type, compiler, content_type.upper()))
return filtered[0]
|
[
"def get_pages_content(self):\n\n #TODO other markup langage (piece of cake)\n for page in self.postlist:\n self.log(\"\\t\" + page['filename'])\n temp=self.env.from_string(page['raw_text'])\n page['pre_content']=temp.render(page=page,pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n if page['markup']=='markdown':\n page['content']=self.md.convert(page['pre_content'])\n\n\n for page in self.pagelist:\n self.log(\"\\t\" + page['filename'])\n temp=self.env.from_string(page['raw_text'])\n page['pre_content']=temp.render(page=page,pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n if page['markup']=='markdown':\n page['content']=self.md.convert(page['pre_content'])",
"def scan_posts(self):\n if not self._scanned:\n print \"Scanning posts \",\n targets = set([])\n for wildcard, destination, _, use_in_feeds in self.config['post_pages']:\n print \".\",\n for base_path in glob.glob(wildcard):\n post = Post(base_path, destination, use_in_feeds,\n self.config['TRANSLATIONS'],\n self.config['DEFAULT_LANG'],\n self.config['BLOG_URL'],\n self.get_compile_html(base_path),\n self.MESSAGES)\n for lang, langpath in self.config['TRANSLATIONS'].items():\n dest = (destination, langpath, post.pagenames[lang])\n if dest in targets:\n raise Exception(\n 'Duplicated output path %r in post %r' %\n (post.pagenames[lang], base_path))\n targets.add(dest)\n self.global_data[post.post_name] = post\n if post.use_in_feeds:\n self.posts_per_year[\n str(post.date.year)].append(post.post_name)\n for tag in post.tags:\n self.posts_per_tag[tag].append(post.post_name)\n else:\n self.pages.append(post)\n for name, post in self.global_data.items():\n self.timeline.append(post)\n self.timeline.sort(cmp=lambda a, b: cmp(a.date, b.date))\n self.timeline.reverse()\n post_timeline = [p for p in self.timeline if p.use_in_feeds]\n for i, p in enumerate(post_timeline[1:]):\n p.next_post = post_timeline[i]\n for i, p in enumerate(post_timeline[:-1]):\n p.prev_post = post_timeline[i + 1]\n self._scanned = True\n print \"done!\"",
"def get_program_and_subprogram_posts(self, request, page_type, content_model):\n context = super(page_type, self).get_context(request)\n\n search_subprogram = request.GET.get('subprogram_id', None)\n date = request.GET.get('date', None)\n\n filter_dict = {}\n # if program\n if self.depth == 4:\n program_title = self.get_ancestors()[2]\n program = Program.objects.get(title=program_title)\n\n filter_dict['parent_programs'] = program\n if search_subprogram:\n if is_int(search_subprogram):\n filter_dict['post_subprogram'] = int(search_subprogram)\n\n context['subprograms'] = program.get_children().type(Subprogram).live().order_by('title')\n # if subprogram\n else:\n subprogram_title = self.get_ancestors()[3]\n program = Subprogram.objects.get(title=subprogram_title)\n filter_dict['post_subprogram'] = program\n\n if date:\n if is_json(date):\n date_range = json.loads(date)\n if isinstance(date_range, dict):\n if 'start' in date_range and 'end' in date_range:\n filter_dict['date__range'] = (date_range['start'], date_range['end'])\n\n all_posts = content_model.objects.live().public().filter(**filter_dict)\n context['all_posts'] = paginate_results(request, all_posts.order_by(\"-date\"))\n context['query_url'] = generate_url(request)\n context['program'] = program\n\n return context",
"def normalise_post(post):\n blog = post.blog\n\n if any(word.lower() in post.body.lower() for word in FILTER_WORDS):\n return None\n \n \"\"\" \n --------------------- NO LONGER NEEDED ---------------------\n \n if (blog == 'Marco.org'):\n if ('coffee' in post.body):\n return None\n if post.title.startswith(u'→'):\n title = post.title[2:]\n body = remove_final_link(post.body)\n permalink = extract_last_link(post.body)\n return ExtendedPost(post.time, post.blog, title, post.author,\n post.link, body, permalink)\n\n elif (blog == 'Daring Fireball') and u'★' in post.body:\n body = remove_final_link(post.body)\n permalink = extract_last_link(post.body)\n return ExtendedPost(post.time, post.blog, post.title, post.author,\n post.link, body, permalink)\n\n elif (blog == 'Erica Sadun') and (post.author == 'erica'):\n return ExtendedPost(post.time, post.blog, post.title,\n None, post.link, post.body, None)\n \n --------------------- NO LONGER NEEDED ---------------------\n \"\"\"\n\n return ExtendedPost(*post, permalink=None)",
"def compile_code(request):\n language = request.GET.get('language')\n print(language)\n\n if(language == \"javascript\"):\n data = compile_javascript_code(request)\n elif(language == \"python\"):\n data = compile_python_code(request)\n \n return data",
"def get_post_front_matter():\n\n short_filenames = os.listdir('posts')\n short_filenames = [x for x in short_filenames if x not in ['__init__.py', '__pycache__']]\n filenames = [os.path.join('posts', x) for x in short_filenames]\n\n all_front_matter = {}\n\n for s, f in zip(short_filenames, filenames):\n\n fm_marker = '---' if f[-2:] == \"md\" else '\"\"\"'\n\n front_matter = {'filename': f}\n front_matter['short_filename'] = s[:-3]\n front_matter['fm_marker'] = fm_marker\n front_matter['file_type'] = f[-2:]\n\n count_bars = 0\n with open(f, 'r') as myfile:\n for line in myfile:\n line = line.rstrip('\\n')\n if line == fm_marker and count_bars == 1:\n break\n elif count_bars == 1:\n front_matter[line[:line.find(':')]] = line[line.find(':')+1:].lstrip()\n elif line == fm_marker:\n count_bars += 1\n front_matter['tags'] = front_matter['tags'].split(', ')\n\n all_front_matter[front_matter['title']] = front_matter\n\n # Add some useful dictionaries\n all_tags = {}\n date_title_to_title = {}\n title_to_date_title = {}\n for title, front_matter in all_front_matter.items():\n\n #all_tags\n for tag in front_matter['tags']:\n tag = tag.lower().capitalize()\n if tag not in all_tags.keys():\n all_tags[tag] = [title]\n else:\n all_tags[tag].append(title)\n\n #date_title\n date_title_to_title[front_matter['date'] + \" | \" + title] = title\n title_to_date_title[title] = front_matter['date'] + \" | \" + title\n\n all_tags['All'] = list(all_front_matter.keys())\n\n return all_front_matter, all_tags, date_title_to_title, title_to_date_title",
"def instantiate_post(valid_classes, post_string, mode='r'):\n if not valid_classes:\n raise NoSupportedSites()\n\n matches = re.search(POST_PATTERN, post_string)\n site_identifier, post_id = matches.groups()\n\n for cls in valid_classes:\n if site_identifier.lower() in {cls.short_code, cls.domain}:\n return cls(post_id, mode=mode)\n\n raise UnsupportedSite('No supported site found for identifier: ' + site_identifier)",
"def extract_compiler(pstree):\n result = \"unknown\"\n ignoreT = {\n 'pstree' : True,\n 'ld' : True,\n 'collect2' : True,\n }\n \n if (pstree == \"unknown\"):\n return result\n\n a = pstree.split(\"---\")\n n = len(a)\n\n for cmd in reversed(a):\n if (not (cmd in ignoreT)):\n result = cmd\n break\n\n return cmd",
"def run(*args):\n request_tokens = {'thought', 'comment', 'workload', 'feedback'}# Words indicative\n # of asking for feedback\n\n # Words added by inspection, e.g. people tends to refer to ppl taking that\n # class, hence not asking for feedback\n undesired_tokens = {'teacher', 'lecturer', 'prof', 'manual',\n 'switch', 'people', 'guys', 'grade', 'offer', 'regist'} \n # Set with ids of courses with 'book' in its name because if\n # 'return_courses' identified courses but the word 'book' is in the post\n # text, it most likely concerns asking for books\n BOOK_IDS = {course.id for course in \n Course.objects.filter(name__contains='book')}\n i = 0\n out = []\n # f = open('post_identification_test.txt', 'w')\n for post_inst in Post.objects.filter(text__contains='?'):\n\n textlower = post_inst.text.lower()\n # Check if undesired words are in there\n if any(word in textlower for word in undesired_tokens):\n continue\n \n # Preliminary condition of any request_token (see above)\n # must be satisfied\n if any(word in textlower for word in request_tokens):\n identified_courses = return_courses(textlower)\n if identified_courses == set():\n continue # No courses identified so go to next post\n \n \n # If no 'book' course is identifed, yet 'book' is in the post,\n # skip it because most likely asking for books\n if (BOOK_IDS.intersection(identified_courses) == set() and\n 'book' in textlower):\n continue\n post_inst.request=True\n i += 1\n for course_id in identified_courses:\n post_inst.courses.add(course_id)\n post_inst.save()\n out.append(post_inst)\n # f.write(''.join([\"Identified: \", ', '.join(identified_courses), ' in\\n',post_inst.text,'\\n--------------------------------------------------------\\n']).encode('utf-8'))\n else:\n continue # If none of keywords in post, likely not relevant\n print \"Found\", i, \"posts that ask for course reviews\"",
"def get_org_wide_posts(self, request, page_type, content_model):\n context = super(page_type, self).get_context(request)\n\n search_program = request.GET.get('program_id', None)\n date = request.GET.get('date', None)\n\n filter_dict = {}\n\n if search_program:\n if is_int(search_program):\n filter_dict['parent_programs'] = int(search_program)\n if date:\n if is_json(date):\n date_range = json.loads(date)\n filter_dict['date__range'] = (date_range['start'], date_range['end'])\n\n all_posts = content_model.objects.filter(**filter_dict)\n context['all_posts'] = paginate_results(request, all_posts.live().public().order_by(\"-date\"))\n context['programs'] = Program.objects.filter(Q(live=True), Q(show_in_menus=True)| Q(location=True)).order_by('title')\n context['query_url'] = generate_url(request)\n\n return context",
"def _get_compiler(self, mode=None):\r\n if self.language == self.resource.source_language:\r\n return DesktopSourceCompiler(self.resource)\r\n else:\r\n return DesktopTranslationCompiler(self.resource)",
"def _get_compiler():\n dist = Distribution({'script_name': os.path.basename(sys.argv[0]),\n 'script_args': sys.argv[1:],\n 'cmdclass': {'config_cc': config_cc}})\n dist.parse_config_files()\n dist.parse_command_line()\n\n cmd_opts = dist.command_options.get('build_ext')\n if cmd_opts is not None and 'compiler' in cmd_opts:\n compiler = cmd_opts['compiler'][1]\n else:\n compiler = None\n\n ccompiler = new_compiler(compiler=compiler)\n customize_compiler(ccompiler)\n\n return ccompiler",
"def process_entry(entry, blog, comp_field=False):\n # Get the date of the post. If it was published more than two days\n # ago, drop the entry. Now, the feed entries is being read, analyzed and processed before being listed in SUBSCRIPTIONS.\n try:\n when = entry['updated_parsed']\n except KeyError:\n when = entry['published_parsed']\n # 'when' is familiarized in Coordinated Universal Time (UTC). More information can be found on https://en.wikipedia.org/wiki/Coordinated_Universal_Time\n when = utc.localize(datetime.fromtimestamp(time.mktime(when)))\n\n \"\"\"\n Having the comp_field enabled (set to True), this would be able to get rid of outdated RSS feeds.\n \"\"\"\n if bool(comp_field)==True:\n if when < START:\n return\n\n title = entry['title']\n\n try:\n author = entry['author']\n except KeyError:\n author = ', '.join(a['name'] for a in entry.get('authors', []))\n link = entry['link']\n try:\n body = entry['content'][0]['value']\n except KeyError:\n body = entry['summary']\n\n postField = Post(when, blog, title, author, link, body)\n return normalise_post(postField)",
"def blog_page():\n try:\n return ContentPage.objects.get(slug=blog_settings.BLOG_SLUG)\n except ContentPage.DoesNotExist:\n return None",
"def check_compiler(self, compilers):\n found = []\n for compiler in compilers:\n is_host = compiler['role'].startswith(HOST_COMPILERS.keyword)\n is_mpi = compiler['role'].startswith(MPI_COMPILERS.keyword) and self['mpi']\n is_shmem = compiler['role'].startswith(SHMEM_COMPILERS.keyword) and self['shmem']\n is_cuda = compiler['role'].startswith(CUDA_COMPILERS.keyword) and self['cuda']\n is_caf = compiler['role'].startswith(CAF_COMPILERS.keyword) and self['caf']\n if is_host or is_mpi or is_shmem or is_cuda or is_caf:\n found.append(compiler)\n if not found:\n raise ConfigurationError(\"Application '%s' is not compatible with any of these compilers:\\n %s\" %\n (self['name'], '\\n '.join(compiler['path'] for compiler in compilers)))\n # If more than one compiler is compatible then choose the first one\n return found[0]",
"def classify(self, post, lang):\n return post.tags_for_language(lang)",
"def find_posts(classes, url):\n # global output #??\n response = \"\"\n num = 0\n print(\"Searching for related material\")\n\n # Check all recent posts to see if any class in classes is mentioned in the\n # title or body. If it is, check the comments to make sure this post isn't\n # the original post the bot will eventually comment on nor is it a post already\n # checked (need this ??? try removing ??)\n for archive in reddit.subreddit('cornell').new(limit=None): # change this to top?\n for class_ in classes:\n if class_ in archive.title + \" \" + archive.selftext:\n\n for comment in archive.comments:\n if class_ in comment.body and archive.url != url and archive.url not in response:\n response = response + \"[\" + archive.title + \"](\" + archive.url + \")\" + \"\\n\\n\"\n num = num + 1\n if num == 5:\n return response\n break\n return response",
"def entry(request, page_name):\n\n md_page = util.get_entry(page_name)\n if md_page == None:\n return render(request, \"encyclopedia/error.html\", {\n \"page_name\": page_name\n })\n else:\n html = markdown2.markdown(md_page) \n return render(request, \"encyclopedia/entry.html\", {\n \"page_name\": page_name,\n \"html\": html\n })",
"async def normalize_post(context, post):\n # ABW: at the moment it makes zero sense to have that API method since there is\n # no fat node that would be source of unnormalized posts\n return await get_post(context, post['author'], post['permlink'])\n\n # decorate\n #if core['community_id']:\n # sql = \"\"\"SELECT title FROM hive_communities WHERE id = :id\"\"\"\n # title = await db.query_one(sql, id=core['community_id'])\n\n # sql = \"\"\"SELECT role_id, title\n # FROM hive_roles\n # WHERE community_id = :cid\n # AND account_id = :aid\"\"\"\n # role = await db.query_row(sql, cid=core['community_id'], aid=author['id'])\n\n # ret['community_title'] = title\n # ret['author_role'] = ROLES[role[0] if role else 0]\n # ret['author_title'] = role[1] if role else ''\n\n #return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Removes the last character entered and returns it because we don't want to display it.
|
def remove_last(view):
position = view.sel()[0].begin()
region = Region(position, position - 1)
character = view.substr(region)
view.run_command("left_delete")
# undoing twice to remove the character and also retain the view's dirty state.
view.run_command("undo")
view.run_command("undo")
return character
|
[
"def promptBackspace(self):\n\n self.hdirty = True\n if self.position != 0:\n self.string = self.string[:self.position - 1] + \\\n self.string[self.position:]\n self.position -= 1\n if self.promptValidate():\n self.promptFromScratch()\n else:\n self.stdscr.delch(self.height - 1, len(self.prompt) +\n 1 + self.position - self.view)\n self.stdscr.move(self.height - 1, len(self.prompt) +\n 1 + self.position - self.view)",
"def delete_char() -> None:\n if (high_scores.insert_user is not None and\n len(high_scores.insert_user) > 0):\n high_scores.insert_user = high_scores.insert_user[:-1]",
"def _remove_prompt(self, line):\n if line.startswith(self.prompt_first):\n return line[len(self.prompt_first):]\n elif line.startswith(self.prompt_next):\n return line[len(self.prompt_next):]\n else:\n return line",
"def delete_last_dash(self, s):\n if not isinstance(s, str): # In future - do it by decorator\n raise ValueError(f'Error! Parametr s - {s} is not string')\n if s[-1] == '-':\n return s[:-1]\n else:\n return s",
"def backspace():\r\n entry_text_area.delete(\"end-2c\", tkinter.END)",
"def getchar():\n char = ''\n while re.match(r\"^[a-zA-Z]$\", char) is None : # other: if len(char)>1 or not char.isalpha():\n char = input(\"Entrez un caractère et un seul : \")\n return char",
"def stripped_input(prompt):\n return input(prompt).strip()",
"def highest_character(a_text):\n return",
"def current_char(self) -> str:",
"def last_chars(fh):\n\n txt = ''\n \n with fh as fg:\n for line in fg:\n last = line[-2]\n txt = txt + last\n \n return txt",
"def strip_final_spaces(buffer):\n if len(buffer) > 0:\n buffer[-1] = buffer[-1].rstrip()",
"def remove_character(self, parsing_key, string):\n\n return self.parsing_rules[parsing_key].sub(\"\", string)",
"def print_last_word(words):\n word = words.pop(-1)\n print word\n ##note popping",
"def last_keypad_name(self):\n return self._last_keypad_name",
"def getNonBlank( self ):\n\t\twhile self.nextChar.isspace():\n\t\t\tself.getChar()",
"def _strip_backspaces(output):\r\n backspace_char = \"\\x08\"\r\n return output.replace(backspace_char, \"\")",
"def _ch_backspace(self):\n pass",
"def delete_last_word(text, number=1):\n words = re.findall(r\"[\\w]+|[^\\w\\s]\", text, re.UNICODE)\n for one in range(1, number + 1):\n text = text.rstrip()\n if text == '':\n return text\n text = text[:len(text) - len(words[-one])]\n return text",
"def _strip_prompt(self, a_string):\r\n logger.info(\"Host {}: Stripping prompt\".format(self._host))\r\n response_list = a_string.split(\"\\n\")\r\n last_line = response_list[-1]\r\n if self._base_prompt in last_line:\r\n return \"\\n\".join(response_list[:-1])\r\n else:\r\n return a_string"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finds the regions of where the cursor(s) should land. It the command is in select mode, the regions are apart, otherwise it is on one character.
|
def _get_found_regions(view, character, sel, line, direction):
if direction == Constants.RIGHT:
line_portion = Region(sel[0], line.b)
else:
line_portion = Region(line.a, sel[1])
from_sel = view.substr(line_portion)
if direction == Constants.RIGHT:
found_pos = from_sel.find(character)
else:
found_pos = from_sel.rfind(character)
if found_pos > 0:
# otherwise we didn't find anything
if current_state().select:
if direction == Constants.RIGHT:
a = sel[0]
b = sel[0] + found_pos
else:
a = line.a + found_pos
b = sel[1]
else:
if direction == Constants.RIGHT:
a = b = sel[0] + found_pos
else:
a = b = line.a + found_pos
return Region(a, b)
# for clearing only the region that can be advanced, we need to
# push back the current selection
return Region(sel[0], sel[1])
|
[
"def get_word_cursor(view, region):\n\n if region.a == region.b:\n lrgn = sublime.Region(0, 0)\n lrgn.a = 0 if region.a < 20 else (region.a - 20)\n lrgn.b = region.b\n rrgn = sublime.Region(0, 0)\n rrgn.a = region.a\n rrgn.b = view.size() if region.b + 20 > view.size() else (region.b + 20)\n matl = re.search(r'[\\w]*$', view.substr(lrgn))\n matr = re.search(r'[\\w]*', view.substr(rrgn))\n gets = ''\n if matl:\n gets += matl.group(0)\n if matr:\n gets += matr.group(0)\n return gets\n else:\n return view.substr(region)",
"def _insert_characters(self, edit):\r\n\r\n regions = [a for a in self.view.sel()]\r\n self.view.sel().clear()\r\n\r\n for region in reversed(regions):\r\n\r\n if self.view.settings().get('auto_match_enabled', True):\r\n position = region.end()\r\n else:\r\n position = region.begin()\r\n\r\n self.view.sel().add(sublime.Region(position, position))",
"def get_participating_regions(self):\n # For first cut just returning east and west\n return [\"east\", \"west\"]",
"def _areasToEdit(self):\n boundaries = []\n keywords = (\"k_2A\", \"k_2D\")\n\n for keyword in keywords:\n for i in range(0, len(self.__fileContent)):\n if keyword in self.__fileContent[i]:\n if keyword == keywords[0]:\n # Mark the position of the offset + magic number 2\n # so we start editing on the proper line\n boundaries.append(i + 2)\n\n # We have completed or exceeded the area we can edit\n elif keyword == keywords[1]:\n boundaries.append(i - 1)\n break\n return boundaries",
"def displayRanges(self):\n ranges = self.textSearch.getRanges()\n print(\"found \", len(ranges), \" ranges\")\n i = 1\n for txtRange in ranges:\n oSel = txtRange.sel\n oCursor = oSel.getText().createTextCursorByRange(oSel)\n if not oCursor:\n print(\"could not get range \", i)\n continue\n oCursor.CharBackColor = 15138560 # yellow\n oCursor.collapseToEnd()\n oCursor.getText().insertString(oCursor, \"(\" + str(i) + \")\", False)\n i += 1",
"def regionSelect(self, x1, y1, x2, y2 ):\n w = abs(x1-x2)\n h = abs(y1-y2)\n\n\n retVal = None\n if( w <= 0 or h <= 0 or w > self.width or h > self.height ):\n warnings.warn(\"regionSelect: the given values will not fit in the image or are too small.\")\n else:\n xf = x2 \n if( x1 < x2 ):\n xf = x1\n yf = y2\n if( y1 < y2 ):\n yf = y1\n retVal = self.crop(xf, yf, w, h)\n \n \n return retVal",
"def see_potential_moves(self):\n potential_regions = []\n regions_with_enemy = []\n potential_regions_with_enemy = []\n returned_regions = []\n\n if self.region.y < 3:\n l1 = [self.region.x, self.region.y + 1]\n potential_regions.append(l1)\n # Don't go out of bounds right\n if self.region.x < 3:\n l2 = [self.region.x + 1, self.region.y]\n potential_regions.append(l2)\n\n x = self.region.x\n y = self.region.y\n max_y = 4 # Plus one for range function\n max_x = 4 # Plus one for range function\n for i in range(x, max_x):\n for j in range(y, max_y):\n potential_regions_with_enemy.append([i, j])\n\n if potential_regions_with_enemy:\n for coordination in potential_regions_with_enemy:\n the_region = self.map.positions[tuple(coordination)]\n if the_region.contains_good_characters:\n regions_with_enemy.append([the_region.x, the_region.y])\n\n if potential_regions:\n for region in potential_regions:\n returned_regions.append(region)\n if regions_with_enemy:\n for region in regions_with_enemy:\n returned_regions.append(region)\n\n # initialize a null list\n unique_regions = []\n\n # traverse for all elements\n for x in returned_regions:\n # check if exists in unique_list or not\n if x not in unique_regions:\n unique_regions.append(x)\n if unique_regions:\n return unique_regions\n else:\n return None",
"def _get_selection_cursor(self, start, end):\n cursor = self._control.textCursor()\n cursor.setPosition(start)\n cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)\n return cursor",
"def select_lines_and_grids(self):\n select_start_coord = self.graphics_select_tool_item.start_coord\n select_end_coord = self.graphics_select_tool_item.end_coord\n select_middle_x = (select_start_coord[0] + select_end_coord[0]) / 2.0\n select_middle_y = (select_start_coord[1] + select_end_coord[1]) / 2.0\n \n for shape in self.shape_dict.values():\n if isinstance(shape, GraphicsLib.GraphicsItemLine):\n (start_point, end_point) = shape.get_graphics_points()\n if min(start_point.start_coord[0], \n end_point.start_coord[0]) \\\n < select_middle_x < \\\n max(start_point.start_coord[0], \\\n end_point.start_coord[0]) and \\\n min(start_point.start_coord[1], \\\n end_point.start_coord[1]) < \\\n select_middle_y < \\\n max(start_point.start_coord[1],\n end_point.start_coord[1]):\n shape.setSelected(True)",
"def _subtract_selection(pushed_regions, sel_regions):\n for reg in pushed_regions:\n for sel in sel_regions:\n if sel.begin() <= reg.end() and reg.begin() <= sel.end():\n # yield the region from the start of the field to the selection\n if reg.begin() < sel.begin():\n yield sublime.Region(reg.begin(), sel.begin())\n # update the region to be from the end of the selection to\n # the end of the field\n reg = sublime.Region(sel.end(), reg.end())\n # if the region is not forward, break and don't add it as field\n if not reg.a < reg.b:\n break\n else:\n # yield the region as field\n yield reg",
"def select_all_smart(self):\n c = self._get_cursor()\n sel_range = c.selectionStart(), c.selectionEnd()\n\n c.clearSelection()\n c.setPosition(self._get_prompt_cursor().position())\n c.setPosition(self._get_end_pos(),\n mode=QtGui.QTextCursor.KeepAnchor)\n new_sel_range = c.selectionStart(), c.selectionEnd()\n if sel_range == new_sel_range:\n # cell already selected, expand selection to whole document\n self.select_document()\n else:\n # set cell selection as active selection\n self._control.setTextCursor(c)",
"def _restore_selection(view, only_other):\n sel_regions = _get_fields(view)\n if not only_other:\n sel_regions.extend(view.sel())\n _erase_fields(view)\n return sel_regions",
"def get_selections(self, bounds):\n\n selections = []\n for s in self.view.sel():\n b = Dimensions(*self.view.text_to_layout(s.b))\n if b.y <= bounds.y.start:\n # We are pass the viewport\n continue\n a = Dimensions(*self.view.text_to_layout(s.a))\n if a.y >= bounds.y.end:\n # We haven't reached the view port yet\n break\n if (\n (bounds.x.start <= a.x <= bounds.x.end or bounds.x.start <= b.x <= bounds.x.end) or\n not (\n (a.x >= bounds.x.end and b.x >= a.x and a.y == b.y) or\n (b.x <= bounds.x.start and a.x <= b.x and a.y == b.y) or\n (a.x >= bounds.x.end and b.x <= bounds.x.start and a.y + 1 == b.y)\n )\n ):\n selections.append(s)\n return selections",
"def extract_regions(self, text):\n pass",
"def getSelectionRange (self,sort=True): # swingTextWidget.\n\n w = self\n sel = 0,0 ### sel = Tk.Text.tag_ranges(w,\"sel\")\n if len(sel) == 2:\n i,j = sel\n else:\n i = j = 0 ### i = j = Tk.Text.index(w,\"insert\")\n\n i,j = w.toPythonIndex(i),w.toPythonIndex(j) \n if sort and i > j: i,j = j,i\n return i,j",
"def select_range_pipeline(self):\r\n colId = self.choose_collection()\r\n docId = self.choose_document(colId)\r\n\r\n def select_range(colId, docId):\r\n print(\"\\nDefine the START of the page range:\")\r\n pageNr_start = self.choose_page(colId, docId)\r\n print(\"Define the END of the page range:\")\r\n print(\"(Make sure that selected sequence has no gaps in it. There is no handling of input errors!)\")\r\n pageNr_end = self.choose_page(colId, docId)\r\n if pageNr_end < pageNr_start:\r\n print(\"The end must be greater than the start!\")\r\n select_range(colId, docId)\r\n \r\n return pageNr_start, pageNr_end\r\n \r\n pageNr_start, pageNr_end = select_range(colId, docId)\r\n\r\n return {\"start\": Cts().from_string(f\"tr:{colId}.{docId}:{pageNr_start}\"),\r\n \"end\": Cts().from_string(f\"tr:{colId}.{docId}:{pageNr_end}\")}",
"def selection_rowcol_preserved_on_replace(view, replaced_region):\n if not any(r.intersects(replaced_region) for r in view.sel()):\n yield\n return\n\n old_rowcols = []\n for r in view.sel():\n if is_point_inside(r.a, replaced_region, strict=True):\n a = view.rowcol(r.a)\n else:\n a = None\n\n if is_point_inside(r.b, replaced_region, strict=True):\n b = view.rowcol(r.b)\n else:\n b = None\n\n old_rowcols.append((a, b))\n\n old_sel_len = len(view.sel())\n old_size = view.size()\n\n yield\n\n assert len(view.sel()) == old_sel_len, \"Number of cursors changed\"\n\n new_end = replaced_region.end() + view.size() - old_size\n new_selection = []\n \n for reg, (a_rowcol, b_rowcol) in zip(view.sel(), old_rowcols):\n if a_rowcol is None:\n a = reg.a\n else:\n a = min(new_end, view.text_point(*a_rowcol))\n \n if b_rowcol is None:\n b = reg.b\n else:\n b = min(new_end, view.text_point(*b_rowcol))\n\n new_selection.append(sublime.Region(a, b))\n\n set_selection(view, to_all=new_selection)",
"def find_cursor_position(screenshot_one_filename, screenshot_two_filename, output_filename):\n\n image_one = load_image(screenshot_one_filename)\n image_two = load_image(screenshot_two_filename)\n\n maybe_rect = find_cursor_by_difference(image_one, image_two)\n\n if maybe_rect:\n draw_rect(image_one, maybe_rect)\n\n save_image(image_one, output_filename)",
"def region_for_selector(self, selector):\n geo = self.currentFrame().findFirstElement(selector).geometry()\n try:\n region = (geo.left(), geo.top(), geo.right(), geo.bottom())\n except:\n raise Exception(\"can't get region for selector '%s'\" % selector)\n return region"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a threaded WSGI Server
|
def makeServer(host, port, app, handler_class=WSGIRequestHandler):
httpd = ThreadedWSGIServer((host, port), handler_class)
httpd.set_app(app)
return httpd
|
[
"def run() -> None: # pragma: no cover\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s'\n )\n args = parse_user_args()\n config = wsgi_config.WSGIConfig()\n config.configure_gwsgi(args)\n httpd = make_wsgi_server(\n config.host, config.port, config.application,\n config.threading, config.processing, config.wsgiref\n )\n logging.debug(f'WSGIServer: Serving HTTP on port {config.port} ...\\n')\n httpd.serve_forever()",
"def run(self, host='127.0.0.1', port=5000):\n httpd = wsgiref.simple_server.make_server('', port, self)\n log(\"PWF now running on http://%s:%s/\" % (host, port,))\n httpd.serve_forever()",
"def run(self):\n parts = urlparse(HOST_BASE)\n domain, port = parts.netloc.split(\":\")\n self.srv = make_server(domain, int(port), self.app)\n try:\n self.srv.serve_forever()\n except:\n import traceback\n traceback.print_exc()\n # Failed to start\n self.srv = None",
"def serve(self, port=8000):\n \n # Make a HTTP-server from the WSGI-handler\n server = make_server('', port, self.wsgi)\n \n # Run the server until terminated\n server.serve_forever()",
"async def start(self):\n self._app = web.Application(\n loop=self._loop, middlewares=self._middlewares\n )\n for resource in self._nyuki.HTTP_RESOURCES:\n resource.RESOURCE_CLASS.register(self._nyuki, self._app.router)\n log.info(\"Starting the http server on {}:{}\".format(self._host, self._port))\n self._handler = self._app.make_handler(access_log=access_log)\n self._server = await self._loop.create_server(\n self._handler, host=self._host, port=self._port\n )",
"def start(self):\n self.httpd = socketserver.ThreadingTCPServer(\n (\"\", self.port), self.handler, False\n )\n self.httpd.request_queue_size = 500\n self.httpd.timeout = 2000\n self.httpd.server_bind()\n self.httpd.server_activate()\n\n if self.cert_filename != \"\" and os.path.isfile(self.cert_filename) and \\\n self.key_filename != \"\" and os.path.isfile(self.key_filename):\n self.httpd.socket = ssl.wrap_socket(\n self.httpd.socket, certfile=self.cert_filename, server_side=True,\n keyfile=self.key_filename\n )\n print(\"start serving\")\n _thread.start_new_thread(self.httpd.serve_forever, ())",
"def start(self):\n server_host = Constants.RPI_IP\n server_port = Constants.WEB_SERVER_PORT # random.randint(10000, 60000)\n new_loop = asyncio.new_event_loop()\n start_server = websockets.serve(self.__send_data, server_host, server_port, loop=new_loop)\n t = threading.Thread(target=self.__start_loop, args=(new_loop, start_server))\n t.start()\n print(\"Server launched\")\n time.sleep(2)",
"def runserver():\n from web.server import runserver\n runserver()",
"def main() -> None:\n\n start_server()",
"def WSGIServer(server_address, wsgi_app):\r\n import wsgiserver\r\n\r\n # Default values of wsgiserver.ssl_adapters uses cheerypy.wsgiserver\r\n # prefix. Overwriting it make it work with web.wsgiserver.\r\n wsgiserver.ssl_adapters = {\r\n 'builtin': 'web.wsgiserver.ssl_builtin.BuiltinSSLAdapter',\r\n 'pyopenssl': 'web.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',\r\n }\r\n\r\n return wsgiserver.CherryPyWSGIServer(server_address, wsgi_app, server_name=\"localhost\")",
"def serve(ip, port, application, ssl=None, processes=1, **kwargs):\r\n\r\n try:\r\n # use werkzeug if its there\r\n from werkzeug.serving import run_simple\r\n print(\"Using Werkzeug run_simple\")\r\n run_simple(ip, port, application, ssl_context=ssl, processes=processes, **kwargs)\r\n return\r\n except ImportError:\r\n pass\r\n\r\n # otherwise just use python's built in wsgi webserver\r\n from wsgiref.simple_server import make_server\r\n server = make_server(ip, port, application)\r\n print(\"Serving on %s:%s, using built in Python server\" % (ip, port))\r\n try:\r\n server.serve_forever()\r\n except KeyboardInterrupt:\r\n pass",
"def SpawnServer(self, RequestHandler):\n while True:\n try:\n port = remote_access.GetUnusedPort()\n address = ('', port)\n self.httpd = SymbolServer(address, RequestHandler)\n break\n except socket.error as e:\n if e.errno == errno.EADDRINUSE:\n continue\n raise\n self.server = 'http://localhost:%i' % port\n self.httpd_pid = os.fork()\n if self.httpd_pid == 0:\n self.httpd.serve_forever(poll_interval=0.1)\n sys.exit(0)",
"def main():\n application = webapp.WSGIApplication(ROUTES, debug=True)\n run_wsgi_app(application)",
"def runsimple(func, server_address=(\"0.0.0.0\", 8080)):\r\n func = StaticMiddleware(func)\r\n func = LogMiddleware(func)\r\n \r\n server = WSGIServer(server_address, func)\r\n\r\n print \"http://%s:%d/\" % server_address\r\n try:\r\n server.start()\r\n except KeyboardInterrupt:\r\n server.stop()",
"def start():\n port = cfg.web.port\n\n events.dispatcher.register_target(event_logger)\n\n logging.info('Starting web server: port=%d' % port)\n utils.DaemonThread(target=bottle.run,\n kwargs={'host': cfg.web.bind,\n 'port': cfg.web.port}).start()",
"def run_wsgi_app(app, port=8080):\n print \"starting eventlet server on port %i\" % port\n wsgi.server(\n eventlet.listen(('', port)),\n app,\n max_size=MAX_GREEN_THREADS,\n )",
"def start_listening(self):\n try:\n simple_server = wsgiref.simple_server.make_server(\"127.0.0.1\", self.port, self.__process_request)\n logger.info(\"Started skyhook on port: %s\" % self.port)\n while self.__keep_running:\n simple_server.timeout = 0.1\n simple_server.handle_request()\n logger.info(\"Quitting\")\n except:\n logger.error(traceback.format_exc())\n logger.error(\"Can't start the server\")\n logger.info(\"Stopped listening\")",
"def start_gunicorn_server():\n from gunicorn.app.base import Application\n\n class WSGIServer(Application):\n def __init__(self, app, options=None):\n self.options = options or {}\n self.application = app\n super(WSGIServer, self).__init__()\n\n def load_config(self):\n config = dict(\n [(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return taskflow()\n\n options = {\n 'bind': '%s:%s' % ('0.0.0.0', os.environ['PORT']),\n 'workers': 2,\n 'loglevel': 'DEBUG',\n 'worker_class': 'eventlet'\n }\n\n WSGIServer(taskflow(), options).run()",
"def runserver():\n load_app().run()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function tests several cases of before_space(s) and after_space(s).
|
def testA():
assert before_space("ab cde")=="ab"
assert before_space("ab cde fg")=="ab"
assert before_space(" ab cde")==""
assert after_space("ab cde")=="cde"
assert after_space("ab cde fg")=="cde fg"
assert after_space(" ab cde")=="ab cde"
|
[
"def test_before_space():\n print('Testing before_space')\n result = currency.before_space(' ')\n introcs.assert_equals('', result)\n result = currency.before_space('Nicholas ')\n introcs.assert_equals('Nicholas', result)\n result = currency.before_space(' Nicholas')\n introcs.assert_equals('', result)\n result = currency.before_space('N i c h o l a s')\n introcs.assert_equals('N', result)",
"def test_after_space():\n print('Testing after_space')\n result = currency.after_space(' ')\n introcs.assert_equals(' ', result)\n result = currency.after_space('Nicholas ')\n introcs.assert_equals('', result)\n result = currency.after_space('Nich olas')\n introcs.assert_equals('olas', result)\n result = currency.after_space(' Nicholas')\n introcs.assert_equals('Nicholas', result)\n result = currency.after_space('N i c h o l a s')\n introcs.assert_equals('i c h o l a s', result)",
"def test_whitespace_features():\n # -- WhitespaceCount ------------------------------------------------------\n assert whitespace_count('1 2 3 ') == 3\n assert whitespace_count(1234) == 0\n assert whitespace_count(' ') == 3\n assert whitespace_count('') == 0\n # -- WhitespaceFraction ---------------------------------------------------\n assert whitespace_fraction('1 2 3 ') == 0.5\n assert whitespace_fraction(1234) == 0\n assert whitespace_fraction(' ') == 1\n assert whitespace_fraction('') == 0",
"def test_double_whitespace_idv(self):\n feed = \"this contains many spaces that should be replaced !\"\n expected = \"this contains many spaces that should be replaced !\"\n\n result = Parser().parse_double_whitespace(feed)\n self.assertEqual(expected, result)",
"def test_spaces_outside_section(self):\n self.assertContains('enwiki_help_editing', 'Naming and_moving')\n self.assertContains('enwiki_help_editing', ' Naming and_moving ')\n self.assertContains('enwiki_help_editing', ' Naming and_moving_')",
"def test_remove_spaces():\n test_string = \"a b c\"\n expected_string = \"abc\"\n\n subbed_string = search_spaces.sub('', test_string)\n assert subbed_string == expected_string",
"def test_calc_can_parse_pre_whitespace_correctly():\n assert False # Don't just make this True.",
"def test_calc_can_parse_whitespace_correctly():\n assert False # Don't just make this True.",
"def before_space(s):\r\n result = 0\r\n result = introcs.count_str(s, \" \")\r\n assert result > 0\r\n\r\n x = introcs.find_str(s, \" \")\r\n\r\n return s[0:x]",
"def test_calc_can_parse_all_forms_of_whitespace():\n assert False # Don't just make this True.",
"def test_calc_can_parse_post_whitespace_correctly():\n assert False # Don't just make this True.",
"def test_remove_with_whitespace():\n assert remove(\"Don't worry my friends.\", string.whitespace) == \"Don'tworrymyfriends.\"",
"def test_despace():\n print('Testing despace')\n # One space\n result = accum2.despace('a b')\n introcs.assert_equals('ab',result)\n\n # Adjacent spaces\n result = accum2.despace('a b')\n introcs.assert_equals('ab',result)\n\n # Non-adjacent spaces\n result = accum2.despace(' a b ')\n introcs.assert_equals('ab',result)\n\n # No spaces\n result = accum2.despace('ab')\n introcs.assert_equals('ab',result)\n\n # All spaces\n result = accum2.despace(' ')\n introcs.assert_equals('',result)",
"def _is_whitespace(self, char):\n if char == \" \":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False",
"def test_output_of_letters_with_commented_whitespace(code, output):\n from esolang_whitespace import whitespace\n assert whitespace(code) == output",
"def defaultIsHSpace(start, stop, line):\n\tif start==0 or stop>=len(line)-1:\n\t\treturn False\n\tif stop - start > 1:\n\t\treturn True\n\treturn line[start]==\"\\t\"",
"def contains_only_spaces(state_machine):\n for state in state_machine.states.values():\n target_state_list = state.transitions().get_target_state_index_list()\n # (1) if a pattern contains only ' ', then there is no place for more than\n # one target state, since every state has only one trigger and one target state\n if len(target_state_list) > 1: return False\n\n # (2) does state exclusively trigger on ' '?\n # (2a) does state trigger on ' '?\n all_trigger_set = state.transitions().get_trigger_set_union()\n if all_trigger_set.contains(ord(' ')) == False: return False\n # (2b) does state trigger on nothing else? \n if all_trigger_set.difference(NumberSet(ord(' '))).is_empty() == False: return False\n\n return True",
"def is_valid_space(board, space):\r\n return space in range(1, 10) and board[space] == ' '",
"def _SpaceRequiredBetween(left, right, is_line_disabled):\n lval = left.value\n rval = right.value\n if (left.is_pseudo and _IsIdNumberStringToken(right) and\n left.previous_token and _IsIdNumberStringToken(left.previous_token)):\n # Space between keyword... tokens and pseudo parens.\n return True\n if left.is_pseudo or right.is_pseudo:\n # There should be a space after the ':' in a dictionary.\n if left.OpensScope():\n return True\n # The closing pseudo-paren shouldn't affect spacing.\n return False\n if left.is_continuation or right.is_continuation:\n # The continuation node's value has all of the spaces it needs.\n return False\n if right.name in pytree_utils.NONSEMANTIC_TOKENS:\n # No space before a non-semantic token.\n return False\n if _IsIdNumberStringToken(left) and _IsIdNumberStringToken(right):\n # Spaces between keyword, string, number, and identifier tokens.\n return True\n if lval == ',' and rval == ':':\n # We do want a space between a comma and colon.\n return True\n if style.Get('SPACE_INSIDE_BRACKETS'):\n # Supersede the \"no space before a colon or comma\" check.\n if left.OpensScope() and rval == ':':\n return True\n if right.ClosesScope() and lval == ':':\n return True\n if (style.Get('SPACES_AROUND_SUBSCRIPT_COLON') and\n (_IsSubscriptColonAndValuePair(left, right) or\n _IsSubscriptColonAndValuePair(right, left))):\n # Supersede the \"never want a space before a colon or comma\" check.\n return True\n if rval in ':,':\n # Otherwise, we never want a space before a colon or comma.\n return False\n if lval == ',' and rval in ']})':\n # Add a space between ending ',' and closing bracket if requested.\n return style.Get('SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET')\n if lval == ',':\n # We want a space after a comma.\n return True\n if lval == 'from' and rval == '.':\n # Space before the '.' in an import statement.\n return True\n if lval == '.' and rval == 'import':\n # Space after the '.' in an import statement.\n return True\n if (lval == '=' and rval in {'.', ',,,'} and\n subtypes.DEFAULT_OR_NAMED_ASSIGN not in left.subtypes):\n # Space between equal and '.' as in \"X = ...\".\n return True\n if lval == ':' and rval in {'.', '...'}:\n # Space between : and ...\n return True\n if ((right.is_keyword or right.is_name) and\n (left.is_keyword or left.is_name)):\n # Don't merge two keywords/identifiers.\n return True\n if (subtypes.SUBSCRIPT_COLON in left.subtypes or\n subtypes.SUBSCRIPT_COLON in right.subtypes):\n # A subscript shouldn't have spaces separating its colons.\n return False\n if (subtypes.TYPED_NAME in left.subtypes or\n subtypes.TYPED_NAME in right.subtypes):\n # A typed argument should have a space after the colon.\n return True\n if left.is_string:\n if (rval == '=' and\n subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in right.subtypes):\n # If there is a type hint, then we don't want to add a space between the\n # equal sign and the hint.\n return False\n if rval not in '[)]}.' and not right.is_binary_op:\n # A string followed by something other than a subscript, closing bracket,\n # dot, or a binary op should have a space after it.\n return True\n if right.ClosesScope():\n # A string followed by closing brackets should have a space after it\n # depending on SPACE_INSIDE_BRACKETS. A string followed by opening\n # brackets, however, should not.\n return style.Get('SPACE_INSIDE_BRACKETS')\n if subtypes.SUBSCRIPT_BRACKET in right.subtypes:\n # It's legal to do this in Python: 'hello'[a]\n return False\n if left.is_binary_op and lval != '**' and _IsUnaryOperator(right):\n # Space between the binary operator and the unary operator.\n return True\n if left.is_keyword and _IsUnaryOperator(right):\n # Handle things like \"not -3 < x\".\n return True\n if _IsUnaryOperator(left) and _IsUnaryOperator(right):\n # No space between two unary operators.\n return False\n if left.is_binary_op or right.is_binary_op:\n if lval == '**' or rval == '**':\n # Space around the \"power\" operator.\n return style.Get('SPACES_AROUND_POWER_OPERATOR')\n # Enforce spaces around binary operators except the blocked ones.\n block_list = style.Get('NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS')\n if lval in block_list or rval in block_list:\n return False\n if style.Get('ARITHMETIC_PRECEDENCE_INDICATION'):\n if _PriorityIndicatingNoSpace(left) or _PriorityIndicatingNoSpace(right):\n return False\n else:\n return True\n else:\n return True\n if (_IsUnaryOperator(left) and lval != 'not' and\n (right.is_name or right.is_number or rval == '(')):\n # The previous token was a unary op. No space is desired between it and\n # the current token.\n return False\n if (subtypes.DEFAULT_OR_NAMED_ASSIGN in left.subtypes and\n subtypes.TYPED_NAME not in right.subtypes):\n # A named argument or default parameter shouldn't have spaces around it.\n return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN')\n if (subtypes.DEFAULT_OR_NAMED_ASSIGN in right.subtypes and\n subtypes.TYPED_NAME not in left.subtypes):\n # A named argument or default parameter shouldn't have spaces around it.\n return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN')\n if (subtypes.VARARGS_LIST in left.subtypes or\n subtypes.VARARGS_LIST in right.subtypes):\n return False\n if (subtypes.VARARGS_STAR in left.subtypes or\n subtypes.KWARGS_STAR_STAR in left.subtypes):\n # Don't add a space after a vararg's star or a keyword's star-star.\n return False\n if lval == '@' and subtypes.DECORATOR in left.subtypes:\n # Decorators shouldn't be separated from the 'at' sign.\n return False\n if left.is_keyword and rval == '.':\n # Add space between keywords and dots.\n return lval not in {'None', 'print'}\n if lval == '.' and right.is_keyword:\n # Add space between keywords and dots.\n return rval not in {'None', 'print'}\n if lval == '.' or rval == '.':\n # Don't place spaces between dots.\n return False\n if ((lval == '(' and rval == ')') or (lval == '[' and rval == ']') or\n (lval == '{' and rval == '}')):\n # Empty objects shouldn't be separated by spaces.\n return False\n if not is_line_disabled and (left.OpensScope() or right.ClosesScope()):\n if (style.GetOrDefault('SPACES_AROUND_DICT_DELIMITERS', False) and (\n (lval == '{' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or\n (rval == '}' and\n _IsDictListTupleDelimiterTok(right, is_opening=False)))):\n return True\n if (style.GetOrDefault('SPACES_AROUND_LIST_DELIMITERS', False) and (\n (lval == '[' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or\n (rval == ']' and\n _IsDictListTupleDelimiterTok(right, is_opening=False)))):\n return True\n if (style.GetOrDefault('SPACES_AROUND_TUPLE_DELIMITERS', False) and (\n (lval == '(' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or\n (rval == ')' and\n _IsDictListTupleDelimiterTok(right, is_opening=False)))):\n return True\n if left.OpensScope() and right.OpensScope():\n # Nested objects' opening brackets shouldn't be separated, unless enabled\n # by SPACE_INSIDE_BRACKETS.\n return style.Get('SPACE_INSIDE_BRACKETS')\n if left.ClosesScope() and right.ClosesScope():\n # Nested objects' closing brackets shouldn't be separated, unless enabled\n # by SPACE_INSIDE_BRACKETS.\n return style.Get('SPACE_INSIDE_BRACKETS')\n if left.ClosesScope() and rval in '([':\n # A call, set, dictionary, or subscript that has a call or subscript after\n # it shouldn't have a space between them.\n return False\n if left.OpensScope() and _IsIdNumberStringToken(right):\n # Don't separate the opening bracket from the first item, unless enabled\n # by SPACE_INSIDE_BRACKETS.\n return style.Get('SPACE_INSIDE_BRACKETS')\n if left.is_name and rval in '([':\n # Don't separate a call or array access from the name.\n return False\n if right.ClosesScope():\n # Don't separate the closing bracket from the last item, unless enabled\n # by SPACE_INSIDE_BRACKETS.\n # FIXME(morbo): This might be too permissive.\n return style.Get('SPACE_INSIDE_BRACKETS')\n if lval == 'print' and rval == '(':\n # Special support for the 'print' function.\n return False\n if left.OpensScope() and _IsUnaryOperator(right):\n # Don't separate a unary operator from the opening bracket, unless enabled\n # by SPACE_INSIDE_BRACKETS.\n return style.Get('SPACE_INSIDE_BRACKETS')\n if (left.OpensScope() and (subtypes.VARARGS_STAR in right.subtypes or\n subtypes.KWARGS_STAR_STAR in right.subtypes)):\n # Don't separate a '*' or '**' from the opening bracket, unless enabled\n # by SPACE_INSIDE_BRACKETS.\n return style.Get('SPACE_INSIDE_BRACKETS')\n if rval == ';':\n # Avoid spaces before a semicolon. (Why is there a semicolon?!)\n return False\n if lval == '(' and rval == 'await':\n # Special support for the 'await' keyword. Don't separate the 'await'\n # keyword from an opening paren, unless enabled by SPACE_INSIDE_BRACKETS.\n return style.Get('SPACE_INSIDE_BRACKETS')\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function tests several cases of first_inside_quotes(s), get_from(json), get_to(json) and has_error(json)
|
def testB():
assert first_inside_quotes('a"bc"de')=="bc"
assert first_inside_quotes('a"bc"de"f"g')=="bc"
assert first_inside_quotes('"a"bc"de"f"g')=="a"
assert first_inside_quotes('abcd""')==""
json1= '{"from":"2 United States Dollars","to":"1.825936 Euros","success":true,"error":""}'
assert get_from(json1)=='2 United States Dollars'
assert get_to(json1)=='1.825936 Euros'
assert has_error(json1)==False
json2='{"from":"","to":"","success":false,"error":"Source currency code is invalid."}'
assert get_from(json2)==''
assert get_to(json2)==''
assert has_error(json2)==True
|
[
"def test_from_json_string(self):\n var1 = None\n ret1 = Base.to_json_string(var1)\n self.assertEqual(Base.from_json_string(ret1), [])\n\n var2 = []\n ret2 = Base.to_json_string(var2)\n self.assertEqual(Base.from_json_string(ret2), [])\n\n var3 = [{}]\n ret3 = Base.to_json_string(var3)\n self.assertEqual(Base.from_json_string(ret3), [{}])\n\n var4 = [{'holberton': 'school'}]\n ret4 = Base.to_json_string(var4)\n self.assertEqual(Base.from_json_string(ret4), var4)\n\n var5 = \"Hello world\"\n ret5 = Base.to_json_string(var5)\n self.assertEqual(Base.from_json_string(ret5), var5)",
"def test_is_json_true(self):\r\n result = json_processor.is_json(self.example_json_string)\r\n self.assertTrue(result)",
"def test_wrong_json_data(self):\n test_dict = {\n \"something\": 1,\n \"data\": \"cakes\",\n \"happy\": \"noodles\",\n \"action\": \"Nothing here\"\n }\n\n info = json.dumps(test_dict)\n try:\n self.test_json_loading(info)\n except WrongDataFormatException:\n pass\n\n try:\n info = self.test_json_loading(data='{\"stuff\": \"yolo!\"}')\n except KeyError:\n pass",
"def test_first_inside_quotes():\n print('Testing first_inside_quotes')\n result = currency.first_inside_quotes('A \"B C\" D')\n introcs.assert_equals('B C', result)\n result = currency.first_inside_quotes('A \"B C\" D \"E F\" G')\n introcs.assert_equals('B C', result)\n result = currency.first_inside_quotes('\"\"')\n introcs.assert_equals('', result)\n result = currency.first_inside_quotes('a\"\"b\"\"c')\n introcs.assert_equals('', result)\n result = currency.first_inside_quotes('''\"abc\"''')\n introcs.assert_equals('abc', result)",
"def test_loads_trailing(self):\n assert orjson.loads(\"{}\\n\\t \") == {}",
"def test_build_from_good_json(self):",
"def test_loads_trailing_invalid(self):\n pytest.raises(orjson.JSONDecodeError, orjson.loads, \"{}\\n\\t a\")",
"def test_json():\n assert hug.types.json({\"this\": \"works\"}) == {\"this\": \"works\"}\n assert hug.types.json(json.dumps({\"this\": \"works\"})) == {\"this\": \"works\"}\n with pytest.raises(ValueError):\n hug.types.json(\"Invalid JSON\")\n\n assert hug.types.json(json.dumps([\"a\", \"b\"]).split(\",\")) == [\"a\", \"b\"]\n with pytest.raises(ValueError):\n assert hug.types.json([\"Invalid JSON\", \"Invalid JSON\"])",
"def has_error(json):\r\n assert type(json) is str\r\n\r\n x = introcs.rfind_str(json, \"error\")\r\n iserror = first_inside_quotes(json[x+6:])\r\n # one character works seems to be a bug with .isalpha or is numeric\r\n # as it wont return True if i check the entire str\r\n iserror = introcs.isalpha(iserror[0:1])\r\n\r\n y = first_inside_quotes(json[2:])\r\n y = introcs.strip(y, ': ,')\r\n y = introcs.capitalize(y)\r\n\r\n y = y == 'False' # if false return True\r\n\r\n # if both true return true as error exists if not return false\r\n return iserror and y or False",
"def _is_suspected_json(string):\n if string.startswith('{') or string.startswith('\\'{') or string.startswith('\\\"{'):\n return True\n if string.startswith('[') or string.startswith('\\'[') or string.startswith('\\\"['):\n return True\n if re.match(r\"^['\\\"\\s]*{.+}|\\[.+\\]['\\\"\\s]*$\", string):\n return True\n\n return False",
"def test_json_load():\n for entry in json_result:\n json.loads(entry)",
"def test_json_converter(self):\r\n result = json_processor.convert_json_to_dict(self.example_json_string)\r\n self.assertEqual(self.expected_output, result)",
"def valid_json(input):\n is_valid = False\n try:\n simplejson.loads(input)\n is_valid = True\n except:\n pass\n\n return is_valid",
"def is_json(input_file):\n\n with open(input_file) as unknown_file:\n c = unknown_file.read(1)\n if c == '{':\n return True\n return False",
"def test_valueerror(self):\n pytest.raises(orjson.JSONDecodeError, orjson.loads, \"{\")\n pytest.raises(ValueError, orjson.loads, \"{\")",
"def test_json(ref_json: str, obj_json: str) -> None:\n ref_obj = Test().from_json(ref_json)\n obj = Test().from_json(obj_json)\n\n assert obj == ref_obj\n assert json.loads(obj.to_json(0)) == json.loads(ref_json)",
"def parse_json_first_key_pair(json_string):\n pass",
"def test_bad_json(self):\n response = self.client.post(\n self.url,\n data=\"{'this': 123}\",\n content_type=CONTENT_TYPE_JSON,\n **{'HTTP_TOKEN': str(self.endpoint_def.token)},\n )\n\n assert response.status_code == HTTPStatus.NOT_ACCEPTABLE\n assert (\n json.loads(response.content)['detail'] == 'Expecting property name enclosed in double quotes'\n )",
"def test_json_format(self):\n reformatted = json.dumps(\n self.json,\n indent=4,\n ensure_ascii=False,\n sort_keys=True,\n )\n reformatted += \"\\n\"\n if self.json_str != reformatted:\n self.json_path.write_text(reformatted)\n self.fail(\"JSON file is not formatted correctly, Fixing...\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function tests several cases of currency_response(currency_from,currency_to,amount_from).
|
def testC():
assert currency_response('USD','EUR',2.5)=='{ "from" : "2.5 United States Dollars", "to" : "2.0952375 Euros", "success" : true, "error" : "" }'
assert currency_response('AAA','EUR',2.5)=='{ "from" : "", "to" : "", "success" : false, "error" : "Source currency code is invalid." }'
assert currency_response('USD','AAA',2.5)=='{ "from" : "", "to" : "", "success" : false, "error" : "Exchange currency code is invalid." }'
|
[
"def test_service_response():\n print('Testing service_response')\n result = currency.service_response('USD', 'EUR', 2.5)\n introcs.assert_equals(\n '{\"success\": true, \"src\": \"2.5 United States Dollars\", \"dst\": \"2.2160175 Euros\", \"error\": \"\"}', result)\n result = currency.service_response('USD', 'EUR', -2.5)\n introcs.assert_equals(\n '{\"success\": true, \"src\": \"-2.5 United States Dollars\", \"dst\": \"-2.2160175 Euros\", \"error\": \"\"}', result)\n result = currency.service_response('p', 'EUR', 2.5)\n introcs.assert_equals(\n '{\"success\": false, \"src\": \"\", \"dst\": \"\", \"error\": \"The rate for currency P is not present.\"}', result)\n result = currency.service_response('USD', 'p', 2)\n introcs.assert_equals(\n '{\"success\": false, \"src\": \"\", \"dst\": \"\", \"error\": \"The rate for currency P is not present.\"}', result)",
"def test_convert_amount(self):\r\n\r\n init = 'USD'\r\n new_currency = 'USD'\r\n amount = 1\r\n curr = CurrencyRates()\r\n curr_conversion = curr.convert(init, new_currency, amount)\r\n self.assertNotEqual(curr_conversion, 2)\r\n self.assertEqual(curr_conversion, 1)",
"def test_currency_endpoint_available(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def testD():\n #Test case for valid iscurrency\n result24 = a1.iscurrency('USD')\n cornell.assert_equals = (True, result24)\n \n #test case for invalid iscurrency\n result25 = a1.iscurrency('AAA')\n cornell.assert_equals = (False, result25)\n \n #Test case for invalid iscurrency\n result26 = a1.iscurrency('usd')\n cornell.assert_equals = (False, result26)\n\n #Test case for valid exchange\n result27 = a1.exchange('USD','HKD',1.0)\n cornell.assert_floats_equal(7.82541, result27)",
"def test_currency_list(self):\n response = self.client.get(self.url)\n self.assertGreater(len(response.data[0]), 2)",
"def test_exchange():\n print('Testing exchange')\n result = currency.exchange('USD', 'EUR', 2.5)\n introcs.assert_floats_equal(2.2160175, result)\n result = currency.exchange('USD', 'EUR', -2.5)\n introcs.assert_floats_equal(-2.2160175, result)",
"def test_exchange_endpoint(self):\n\n response = self.get(reverse('api-currency-exchange'), expected_code=200)\n\n self.assertIn('base_currency', response.data)\n self.assertIn('exchange_rates', response.data)",
"def testD():\n assert iscurrency('USD')==True\n assert iscurrency('KES')==True\n assert iscurrency('kes')==False\n assert iscurrency('aaa')==False\n assert exchange('USD','EUR',2.5)-2.0952375<=0.000001\n assert exchange('USD','KES',5.4)-557.4752586<=0.000001\n assert exchange('LKR','NGN',1.7)-3.9997994984588<=0.000001",
"def test_convert_amount():\n money = convert_amount(\"1.000,00€\")\n assert money.amount == Decimal(\"1000.00\")\n assert money.currency == EUR",
"def test_iscurrency():\n print('Testing iscurrency')\n result = currency.iscurrency('USD')\n introcs.assert_true(result)\n result = currency.iscurrency('p')\n introcs.assert_false(result)",
"def convert(amount, from_currency, to_currency):\n\tfrom_currency = from_currency.upper()\n\tto_currency = to_currency.upper()\n\tlink = \"https://open.er-api.com/v6/latest/USD\"\n\tfile_name = \"./.files/USD.json\"\n\t\n\ttry:\n\t\tjson_data = requests.get(link).json()\n\t\trates_data = json_data[\"rates\"]\n\texcept Exception:\n\t\twith open(file_name, \"r\") as jsf:\n\t\t\tdata = json.load(jsf)\n\t\t\trates_data = data[\"rates\"]\n\telse:\n\t\tif(is_data_outdataed(json_data)):\n\t\t\twrite_data_to_file(json_data)\n\t\t\twrite_last_update(json_data)\n\n\tif(from_currency == \"USD\"):\n\t\tres = amount * rates_data[to_currency]\n\telif (to_currency == \"USD\"):\n\t\tres = amount / rates_data[from_currency]\n\telse:\n\t\tres_1 = convert(amount, from_currency, \"USD\")\n\t\tres = convert(res_1, \"USD\", to_currency)\n\treturn round(res, 4)",
"def test_post_same_currency(self):\n bob_acc = Account(owner='Bob', currency='USD', balance=1000)\n bob_acc.save()\n alice_acc = Account(owner='Alice', currency='USD', balance=800)\n alice_acc.save()\n\n response = self.client.post(reverse('payment-list'),\n data={'from_account': bob_acc.id,\n 'to_account': alice_acc.id,\n 'amount': 500}).data\n\n self.assertTrue(isinstance(response, dict))\n # check all fields of created model are present\n self.assertEqual(response['id'], 1)\n self.assertEqual(response['from_account'], bob_acc.id)\n self.assertEqual(response['to_account'], alice_acc.id)\n self.assertEqual(response['amount'], 500)\n # check that Payment model was created\n payments = Payment.objects.all()\n self.assertEqual(len(payments), 1)\n self.assertEqual(payments[0].id, 1)\n self.assertEqual(payments[0].from_account.id, bob_acc.id)\n self.assertEqual(payments[0].to_account.id, alice_acc.id)\n self.assertEqual(payments[0].amount, 500)\n # finally check that accounts were charged appropriately\n bob_acc.refresh_from_db()\n self.assertEqual(bob_acc.balance, 500)\n alice_acc.refresh_from_db()\n self.assertEqual(alice_acc.balance, 1300)",
"def test_rate_exceed_result(self):\n process_result = process_response(self.resp_rate_exceed)\n self.assertEqual(process_result[\"result\"], 2)",
"def test_currency_symbol(self):\r\n\r\n init = 'USD'\r\n c2 = CurrencyCodes()\r\n c_symbol = c2.get_symbol(init)\r\n self.assertEqual(c_symbol, 'US$')",
"def test_get_account_by_type_and_currency(self):\n pass",
"def test_convert_amount_unknown_currency():\n with pytest.raises(CurrencyException) as exc:\n convert_amount(\"1.000,00$\")\n assert \"Unknown currency: $\" in str(exc)",
"def test_price_cross_exchange(self):\n pc = PriceEngine(logging=False)\n # Create an price on the TEST_EX\n pc.process_update_message(PriceUpdate(\"TEST_EX\", \"BTC\", \"USD\", 1001.0, 0.0009))\n rate_request = RateRequest(\"TEST_EX\", \"BTC\", \"TEST_EX\", \"USD\")\n pc.handle_rate_request(rate_request)\n self.assertEqual(rate_request.rate, 1001)\n # Create a better price on another TEST_EX_2\n # Check we pick up the improved rate\n pc.process_update_message(PriceUpdate(\"TEST_EX_2\", \"BTC\", \"USD\", 1100.0, 0.0008))\n rate_request = RateRequest(\"TEST_EX\", \"BTC\", \"TEST_EX\", \"USD\")\n pc.handle_rate_request(rate_request)\n self.assertEqual(rate_request.rate, 1100)",
"def get_test_account_balance_response():\n\treturn {\n\t\t\"ResultType\":0,\n\t\t\"ResultCode\":0,\n\t\t\"ResultDesc\":\"The service request has been accepted successfully.\",\n\t\t\"OriginatorConversationID\":\"10816-694520-2\",\n\t\t\"ConversationID\":\"AG_20200927_00007cdb1f9fb6494315\",\n\t\t\"TransactionID\":\"LGR0000000\",\n\t\t\"ResultParameters\":{\n\t\t\"ResultParameter\":[\n\t\t\t{\n\t\t\t\"Key\":\"ReceiptNo\",\n\t\t\t\"Value\":\"LGR919G2AV\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"Conversation ID\",\n\t\t\t\"Value\":\"AG_20170727_00004492b1b6d0078fbe\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"FinalisedTime\",\n\t\t\t\"Value\":20170727101415\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"Amount\",\n\t\t\t\"Value\":10\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"TransactionStatus\",\n\t\t\t\"Value\":\"Completed\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"ReasonType\",\n\t\t\t\"Value\":\"Salary Payment via API\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"TransactionReason\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"DebitPartyCharges\",\n\t\t\t\"Value\":\"Fee For B2C Payment|KES|33.00\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"DebitAccountType\",\n\t\t\t\"Value\":\"Utility Account\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"InitiatedTime\",\n\t\t\t\"Value\":20170727101415\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"Originator Conversation ID\",\n\t\t\t\"Value\":\"19455-773836-1\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"CreditPartyName\",\n\t\t\t\"Value\":\"254708374149 - John Doe\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"DebitPartyName\",\n\t\t\t\"Value\":\"600134 - Safaricom157\"\n\t\t\t}\n\t\t]\n\t},\n\t\"ReferenceData\":{\n\t\"ReferenceItem\":{\n\t\t\"Key\":\"Occasion\",\n\t\t\"Value\":\"aaaa\"\n\t}\n\t}\n\t\t}",
"def test_get_whitelist_by_currency(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function tests several cases of is_currency(currency) and exchange(currency_from,currency_to,amount_from).
|
def testD():
assert iscurrency('USD')==True
assert iscurrency('KES')==True
assert iscurrency('kes')==False
assert iscurrency('aaa')==False
assert exchange('USD','EUR',2.5)-2.0952375<=0.000001
assert exchange('USD','KES',5.4)-557.4752586<=0.000001
assert exchange('LKR','NGN',1.7)-3.9997994984588<=0.000001
|
[
"def test_exchange():\n print('Testing exchange')\n result = currency.exchange('USD', 'EUR', 2.5)\n introcs.assert_floats_equal(2.2160175, result)\n result = currency.exchange('USD', 'EUR', -2.5)\n introcs.assert_floats_equal(-2.2160175, result)",
"def _smart_exchange(self, currency_amount):\n\n # first, compute amount we have to convert to and amount we have for conversion\n \n\n to_conv = {}\n from_conv = copy.deepcopy(self.cash)\n for curr in currency_amount:\n if curr not in self.cash:\n from_conv[curr] = Cash(0.00, curr)\n\n to = currency_amount[curr] - from_conv[curr].amount\n\n if to > 0:\n to_conv[curr] = Cash(to, curr)\n del from_conv[curr] # no extra cash available for conversion\n else:\n # no conversion will be necessary\n from_conv[curr].amount -= currency_amount[curr]\n\n # perform currency exchange\n exchange_history = []\n for to_cash in to_conv.values():\n one_exchange = False\n # Try converting one shot if possible\n for from_cash in from_conv.values():\n if from_cash.amount_in(to_cash.currency) >= to_cash.amount:\n # perform conversion\n self.exchange_currency(to_currency=to_cash.currency,\n from_currency=from_cash.currency,\n to_amount=to_cash.amount)\n\n # update amount we have to convert to or amount we have for conversion\n amt = to_cash.amount_in(from_cash.currency)\n\n rate = from_cash.exchange_rate(to_cash.currency)\n exchange_history.append(\n (amt, from_cash.currency, to_cash.amount,\n to_cash.currency, rate))\n\n from_cash.amount -= amt\n to_cash.amount = 0.00\n\n # move to next 'to_cash'\n one_exchange = True\n break\n\n # If we reached here,\n # it means we couldn't perform one currency exchange to meet our 'to_cash'\n # So we'll just convert whatever we can\n if not one_exchange:\n for from_cash in from_conv.values():\n if from_cash.amount_in(to_cash.currency) >= to_cash.amount:\n # perform conversion\n self.exchange_currency(\n to_currency=to_cash.currency,\n from_currency=from_cash.currency,\n to_amount=to_cash.amount)\n\n amt = to_cash.amount_in(from_cash.currency)\n rate = from_cash.exchange_rate(to_cash.currency)\n exchange_history.append(\n (amt, from_cash.currency, to_cash.amount,\n to_cash.currency, rate))\n\n # update amount we have to convert to and amount we have for conversion\n from_cash.amount -= amt\n to_cash.amount = 0.00\n else:\n self.exchange_currency(\n to_currency=to_cash.currency,\n from_currency=from_cash.currency,\n from_amount=from_cash.amount)\n amt = from_cash.amount_in(to_cash.currency)\n\n rate = from_cash.exchange_rate(to_cash.currency)\n exchange_history.append(\n (from_cash.amount, from_cash.currency, amt,\n to_cash.currency, rate))\n\n # update amount we have to convert to and amount we have for conversion\n to_cash.amount -= amt\n from_cash.amount = 0.00\n\n return exchange_history",
"def testD():\n #Test case for valid iscurrency\n result24 = a1.iscurrency('USD')\n cornell.assert_equals = (True, result24)\n \n #test case for invalid iscurrency\n result25 = a1.iscurrency('AAA')\n cornell.assert_equals = (False, result25)\n \n #Test case for invalid iscurrency\n result26 = a1.iscurrency('usd')\n cornell.assert_equals = (False, result26)\n\n #Test case for valid exchange\n result27 = a1.exchange('USD','HKD',1.0)\n cornell.assert_floats_equal(7.82541, result27)",
"def test_iscurrency():\n print('Testing iscurrency')\n result = currency.iscurrency('USD')\n introcs.assert_true(result)\n result = currency.iscurrency('p')\n introcs.assert_false(result)",
"def test_convert_amount(self):\r\n\r\n init = 'USD'\r\n new_currency = 'USD'\r\n amount = 1\r\n curr = CurrencyRates()\r\n curr_conversion = curr.convert(init, new_currency, amount)\r\n self.assertNotEqual(curr_conversion, 2)\r\n self.assertEqual(curr_conversion, 1)",
"def testC():\n assert currency_response('USD','EUR',2.5)=='{ \"from\" : \"2.5 United States Dollars\", \"to\" : \"2.0952375 Euros\", \"success\" : true, \"error\" : \"\" }'\n assert currency_response('AAA','EUR',2.5)=='{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"Source currency code is invalid.\" }'\n assert currency_response('USD','AAA',2.5)=='{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"Exchange currency code is invalid.\" }'",
"def currency_exchange(currency, amount):\n if currency == 'EUR':\n amount *= 0.88\n elif currency == 'USD':\n amount *= 0.80\n elif currency == 'GBP':\n amount *= 1\n return round(amount, 2)",
"def test_exchange_endpoint(self):\n\n response = self.get(reverse('api-currency-exchange'), expected_code=200)\n\n self.assertIn('base_currency', response.data)\n self.assertIn('exchange_rates', response.data)",
"def test_price_cross_exchange(self):\n pc = PriceEngine(logging=False)\n # Create an price on the TEST_EX\n pc.process_update_message(PriceUpdate(\"TEST_EX\", \"BTC\", \"USD\", 1001.0, 0.0009))\n rate_request = RateRequest(\"TEST_EX\", \"BTC\", \"TEST_EX\", \"USD\")\n pc.handle_rate_request(rate_request)\n self.assertEqual(rate_request.rate, 1001)\n # Create a better price on another TEST_EX_2\n # Check we pick up the improved rate\n pc.process_update_message(PriceUpdate(\"TEST_EX_2\", \"BTC\", \"USD\", 1100.0, 0.0008))\n rate_request = RateRequest(\"TEST_EX\", \"BTC\", \"TEST_EX\", \"USD\")\n pc.handle_rate_request(rate_request)\n self.assertEqual(rate_request.rate, 1100)",
"def exchange_currency(self,\n to_currency,\n from_currency,\n to_amount=None,\n from_amount=None):\n\n from_currency = from_currency.upper()\n to_currency = to_currency.upper()\n \n # add cash instances of both currencies to portfolio if non-existent\n self.add_cash(0.0, from_currency)\n self.add_cash(0.0, to_currency)\n \n if to_amount is None and from_amount is None:\n raise Exception(\n \"Argument `to_amount` or `from_amount` must be specified.\")\n \n if to_amount is not None and from_amount is not None:\n raise Exception(\n \"Please specify only `to_amount` or `from_amount`, not both.\")\n \n if to_amount is not None:\n from_amount = self.cash[to_currency].exchange_rate(\n from_currency) * to_amount\n elif from_amount is not None:\n to_amount = self.cash[from_currency].exchange_rate(\n to_currency) * from_amount\n\n self.add_cash(to_amount, to_currency)\n self.add_cash(-from_amount, from_currency)",
"def test_convert_amount():\n money = convert_amount(\"1.000,00€\")\n assert money.amount == Decimal(\"1000.00\")\n assert money.currency == EUR",
"def exchange(src, dst, amt):\r\n assert iscurrency(src)\r\n assert iscurrency(dst)\r\n assert isinstance(float(amt), float) or isinstance(int(amt), int)\r\n assert not isinstance(amt, bool)\r\n\r\n # query the server\r\n x = introcs.urlread(\r\n 'https://ecpyfac.ecornell.com/python/currency/fixed?src=' +\r\n src + '&dst=' + dst + '&amt=' + str(amt) + '&key=' + APIKEY)\r\n\r\n y = get_dst(x)\r\n z = before_space(y)\r\n\r\n return float(z)",
"def test_convert_amount_unknown_currency():\n with pytest.raises(CurrencyException) as exc:\n convert_amount(\"1.000,00$\")\n assert \"Unknown currency: $\" in str(exc)",
"def test_currency_endpoint_available(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_currency(self):\n self.assertEqual(AccountPeriodMixinTest.ACCOUNT_CURRENCY, self._instance.currency)",
"def matches_currencies(self, cur1, cur2):\n return self.base_currency == cur1 and self.quote_currency == cur2",
"def test_same_currency_for_sell_and_buy_is_not_allowed(self):\n trade = self.factory.make_trade(\n buy_currency=currencies.USD, sell_currency=currencies.USD, save=False\n )\n with self.assertRaisesRegexp(\n ValidationError, r\"Currencies must be different\\.\"\n ):\n trade.full_clean()",
"def valid_currency(x: str) -> bool:\n return x in CURRENCIES",
"def test_get_whitelist_by_currency(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register all the callables in a single module with gin. A useful way to add gin configurability to a codebase without explicilty using the .configurable decorator.
|
def register_module_with_gin(module, module_name=None):
module_name = module.__name__ if module_name is None else module_name
for attr in dir(module):
if callable(getattr(module, attr)):
setattr(module, attr, gin.configurable(getattr(module, attr), module=module_name))
|
[
"def call_register(root_dir):\n for mod in imported_modules:\n if hasattr(mod, \"register\"):\n mod.register()",
"def register_run_config(name):\n basecls = BaseConfig\n registry = RUN_CONFIG_REGISTRY\n reg_func = partial(_register, name=name, basecls=basecls, registry=registry)\n return reg_func",
"def register_module(self, mod, prefix='', what=None, **routing_info):\n if what is None:\n what = mod.__all__\n for func_identifier in what:\n func = getattr(mod, func_identifier)\n func_name = prefix + func.__name__\n self.register_task(func, func_name, **routing_info)",
"def register(self, config, func, key=None):\n config.register(self, key, func)",
"def register(registry:list):\n def decorate(func):\n registry.append(func)\n return func\n return decorate",
"def register(self, app, options, first_registration=False):\n if first_registration:\n self.submodules = list(app.find_submodules(self.import_name))\n\n super(PgAdminModule, self).register(app, options, first_registration)\n\n for module in self.submodules:\n app.register_blueprint(module)",
"def configurable(self, configurable):\n self.configurables.append(configurable)",
"def test_custom_global_generator_multiple():\n c = TestClient()\n for num in range(3):\n generator = textwrap.dedent(f\"\"\"\n class MyGenerator{num}:\n def __init__(self, conanfile):\n self._conanfile = conanfile\n def generate(self):\n self._conanfile.output.info(f\"MyGenerator{num}!!\")\n \"\"\")\n save(os.path.join(c.cache.custom_generators_path, f\"mygen{num}.py\"), generator)\n conanfile = textwrap.dedent(\"\"\"\n [requires]\n pkg/0.1\n\n [generators]\n MyGenerator0\n MyGenerator1\n MyGenerator2\n \"\"\")\n c.save({\"pkg/conanfile.py\": GenConanfile(\"pkg\", \"0.1\"),\n \"conanfile.txt\": conanfile})\n c.run(\"create pkg\")\n c.run(\"install .\")\n assert \"conanfile.txt: Generator 'MyGenerator0' calling 'generate()'\" in c.out\n assert \"conanfile.txt: Generator 'MyGenerator1' calling 'generate()'\" in c.out\n assert \"conanfile.txt: Generator 'MyGenerator2' calling 'generate()'\" in c.out\n assert \"conanfile.txt: MyGenerator0!!\" in c.out\n assert \"conanfile.txt: MyGenerator1!!\" in c.out\n assert \"conanfile.txt: MyGenerator2!!\" in c.out",
"def register():\n\n register_parallel_backend('flink', FlinkBackend)",
"def register_all(keras_objects: bool = True, custom_kernels: bool = True) -> None:\n if keras_objects:\n register_keras_objects()\n if custom_kernels:\n register_custom_kernels()",
"def register_by_module(module):\n\n # Get a list of all user specified modules attached to module\n module_names = module.__all__\n\n # Add in package preamble\n module_names = [module.__name__ + '.' + mod for mod in module_names]\n\n # Register all of the sub-modules\n register(module_names)\n\n return",
"def test_custom_global_generator_imports():\n c = TestClient()\n generator = textwrap.dedent(\"\"\"\n from _myfunc import mygenerate\n\n class MyCustomGenerator:\n def __init__(self, conanfile):\n self._conanfile = conanfile\n def generate(self):\n mygenerate(self._conanfile)\n \"\"\")\n myaux = textwrap.dedent(\"\"\"\n def mygenerate(conanfile):\n conanfile.output.info(\"MYGENERATE WORKS!!\")\n \"\"\")\n save(os.path.join(c.cache.custom_generators_path, \"mygen.py\"), generator)\n save(os.path.join(c.cache.custom_generators_path, \"_myfunc.py\"), myaux)\n\n c.save({\"conanfile.txt\": \"\"})\n c.run(\"install . -g MyCustomGenerator\")\n assert \"conanfile.txt: Generator 'MyCustomGenerator' calling 'generate()'\" in c.out\n assert \"conanfile.txt: MYGENERATE WORKS!!\" in c.out",
"def register(func):\n print('running register(%s)' % func)\n registry.append(func)\n return func",
"def _register_config(config_package, config_mod):\n prefix = _get_mod_name(config_package)\n for x in [i for i in dir(config_mod) if _is_valid_config_member(config_mod, i, prefix=prefix)]:\n v = getattr(config_mod, x)\n _merge_config_value(config_package, x, v, fromp=config_mod.__name__)",
"def register_core_plugins():\n\n for cls in _builtin_sys_plugins:\n obj = cls()\n obj.activate()",
"def configure(app):\n pass",
"def register(ctx, pkgs=None):\n pkgs = pkgs or []\n if len(pkgs) == 0:\n pkgs = _WORKFLOW_PACKAGES.get()\n\n ctx.obj[CTX_PACKAGES] = pkgs",
"def register(app, **kwargs):\n providers_func = ProviderApi.as_view('providers')\n for uri in ['/providers', '/providers/', '/providers/<name>',\n '/providers/<name>/']:\n app.add_url_rule(uri, view_func=providers_func,\n methods=['GET'])",
"def register_binning(name: Optional[str] = None):\n\n def decorator(f: Callable) -> Callable:\n key = name or f.__name__[:-8]\n binning_methods[key] = f\n return f\n\n return decorator"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use for parsing collections that may contain a 'gin' key. The 'gin' key is assumed to map to either a dict or str value that contains gin bindings. e.g.
|
def gin_dict_parser(coll):
if 'gin' in coll:
if is_mapping(coll['gin']):
gin.parse_config("".join(map(lambda t: f'{t[0]} = {t[1]}\n', iteritems(coll['gin']))))
elif isinstance(coll['gin'], str):
gin.parse_config(coll['gin'])
return coll
|
[
"def nested_gin_dict_parser(coll):\n return nested_dict_walker(gin_dict_parser, coll)",
"def _parse_input_(self, input_item):\r\n for key, value in input_item.items():\r\n if isinstance(value, dict):\r\n value = PreserveKeysDottedDict(**{str(k): v for k, v in value.items()})\r\n if isinstance(value, list):\r\n _list = []\r\n for item in value:\r\n if isinstance(item, dict):\r\n _list.append(PreserveKeysDottedDict(item))\r\n else:\r\n _list.append(item)\r\n value = _list\r\n self.__setitem__(key, value)",
"def _parse_input_(self, input_item):\r\n for key, value in input_item.items():\r\n if isinstance(value, dict):\r\n value = DottedDict(**{str(k): v for k, v in value.items()})\r\n if isinstance(value, list):\r\n _list = []\r\n for item in value:\r\n if isinstance(item, dict):\r\n _list.append(DottedDict(item))\r\n else:\r\n _list.append(item)\r\n value = _list\r\n self.__setitem__(key, value)",
"def parse_collection(entry, collection_type, element_type):\n\n return collection_type([element_type(x) for x in entry[1:-1].split(',')])",
"def parse(self, hgvs_string):\n pass",
"def _parseColl ( rc_Collection, name, url ):\n cf = Config(rc_Collection)\n Shorts = cf.keys()\n del Shorts[Shorts.index('__TITLE__')]\n Variables = {}\n for short in Shorts:\n long, units = cf(short).split(';')\n units = units.replace(' ','').replace('1','none')\n Variables[short] = dict(long=long, units=units)\n\n Collection = dict ( name = name,\n title = cf('__TITLE__'),\n vars = Variables,\n rc = rc_Collection,\n url = url )\n return Collection",
"def _ParseKey(self, mediator, registry_key, value_name):",
"def test_PluggableTransport_parseArgumentsIntoDict_valid_list(self):\n pt = bridges.PluggableTransport()\n args = pt._parseArgumentsIntoDict([\"sharedsecret=foobar\",\n \"publickey=1234\"])\n self.assertIsInstance(args, dict)\n self.assertItemsEqual(args, {\"sharedsecret\": \"foobar\",\n \"publickey\": \"1234\"})",
"def _process_annotations(annotations_string):\n return json.loads(annotations_string)",
"def test_spotinst_tags(self):\n spotinst_tags = [\n {\n \"tagKey\": \"foo\",\n \"tagValue\": \"bar\"\n }\n ]\n\n actual_dict = spotinst_tags_to_dict(spotinst_tags)\n\n self.assertEqual(\n {\"foo\": \"bar\"},\n actual_dict\n )",
"def _parse_trans_line_gids(trans_line_gids):\n if isinstance(trans_line_gids, str):\n trans_line_gids = json.loads(trans_line_gids)\n\n return trans_line_gids",
"def test_parser_dict(fresh_aiida_env, incar_dict_example):\n\n parser = IncarParser(data=get_data_node('dict', dict=incar_dict_example))\n assert isinstance(parser.incar, get_data_class('dict'))",
"def __getitem__(self, item):\n if isinstance(item, str):\n return self.annotations[item]\n else:\n raise IndexError('Trying to use Statistics.__getitem__ with non-string key.')",
"def parseInputPars(input_dict,in_list):\n input_pars = {}\n for par in in_list:\n if par in input_dict: \n input_pars[par] = input_dict[par]\n else: \n input_pars[par] = DEFAULT_PARS[par]\n \n return input_pars",
"def get_tagged_users_handles_dict():\n tagged_users_dict = {}\n with open('tagged_users_handle.txt', 'r') as f:\n lines = f.readlines()\n current_tag = \"\"\n current_user_list = []\n for line in lines:\n colon_index = line.find(\":\") \n if colon_index >= 0:\n if len(current_user_list) > 0:\n tagged_users_dict[current_tag] = current_user_list\n current_tag = line[:colon_index].lower()\n current_user_list = []\n elif len(line) > 0 and line != \"\\n\" and line != \"\":\n current_user_list.append(line[1:])\n if len(current_user_list) > 0:\n tagged_users_dict[current_tag] = current_user_list\n return tagged_users_dict",
"def test_PluggableTransport_parseArgumentsIntoDict_valid_list_multi(self):\n pt = bridges.PluggableTransport()\n args = pt._parseArgumentsIntoDict([\"sharedsecret=foobar,password=baz\",\n \"publickey=1234\"])\n self.assertIsInstance(args, dict)\n self.assertItemsEqual(args, {\"sharedsecret\": \"foobar\",\n \"password\": \"baz\",\n \"publickey\": \"1234\"})",
"def parse_value_for_mongo(value):\n newval = 'DefaultValue'\n if isinstance(value, np.ndarray):\n newval = value.tolist()\n elif isinstance(value, dict):\n newval = {}\n for k, v in value.items():\n key = parse_value_for_mongo(k)\n if isinstance(key, str) and '.' in key:\n key = key.replace('.', '-')\n newval[key] = parse_value_for_mongo(v)\n elif hasattr(value, '__iter__') and not isinstance(value, str):\n newval = []\n for element in value:\n newval.append(parse_value_for_mongo(element))\n elif isinstance(value, numbers.Number):\n if isinstance(value, numbers.Rational):\n newval = int(value)\n elif isinstance(value, numbers.Real):\n newval = float(value)\n elif isinstance(value, numbers.Complex):\n newval = complex(value)\n newval = int(value)\n else:\n newval = str(value)\n return newval",
"def _parse_content_all(data : dict, value : Any) -> Any:\n if isinstance(value, str):\n return ConduitStep._parse_context_string(data, value)\n elif isinstance(value, list):\n return [ConduitStep._parse_content_all(data, x) for x in value]\n elif isinstance(value, dict):\n return { ConduitStep._parse_context_string(data, x) : ConduitStep._parse_content_all(data, y) for x, y in value.items() }\n return value",
"def _preprocess_metadata(raw_metadata):\n metadata_content = json.loads(\"\".join(raw_metadata))\n preprocessed_metadata = {}\n\n for key, value in metadata_content.items():\n preprocessed_metadata[key] = AgeRange.from_string(value)\n\n return preprocessed_metadata"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use for parsing nested collections that may contain a 'gin' key. The 'gin' key is assumed to map to a dict value that contains gin bindings (see gin_dict_parser). Enables support for gin keys in yaml files.
|
def nested_gin_dict_parser(coll):
return nested_dict_walker(gin_dict_parser, coll)
|
[
"def gin_dict_parser(coll):\n if 'gin' in coll:\n if is_mapping(coll['gin']):\n gin.parse_config(\"\".join(map(lambda t: f'{t[0]} = {t[1]}\\n', iteritems(coll['gin']))))\n elif isinstance(coll['gin'], str):\n gin.parse_config(coll['gin'])\n return coll",
"def _parse_input_(self, input_item):\r\n for key, value in input_item.items():\r\n if isinstance(value, dict):\r\n value = DottedDict(**{str(k): v for k, v in value.items()})\r\n if isinstance(value, list):\r\n _list = []\r\n for item in value:\r\n if isinstance(item, dict):\r\n _list.append(DottedDict(item))\r\n else:\r\n _list.append(item)\r\n value = _list\r\n self.__setitem__(key, value)",
"def _parse_input_(self, input_item):\r\n for key, value in input_item.items():\r\n if isinstance(value, dict):\r\n value = PreserveKeysDottedDict(**{str(k): v for k, v in value.items()})\r\n if isinstance(value, list):\r\n _list = []\r\n for item in value:\r\n if isinstance(item, dict):\r\n _list.append(PreserveKeysDottedDict(item))\r\n else:\r\n _list.append(item)\r\n value = _list\r\n self.__setitem__(key, value)",
"def test_extra_sections_when_lines_dict_with_nested_dicts(self):\n class MySchema(Schema):\n foo = ListOption(\n item=DictOption(item=DictOption()))\n\n config = StringIO(\"\"\"\n[__main__]\nfoo = dict1\n dict2\n[dict1]\nbar = dict3\n[dict2]\nbaz = dict4\n[dict3]\nwham = 1\n[dict4]\nwhaz = 2\n\"\"\")\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n parser.parse_all()\n\n self.assertEqual(parser.values(),\n {'__main__': {'foo': [\n {'bar': {'wham': '1'}},\n {'baz': {'whaz': '2'}}]}})\n self.assertTrue(parser.is_valid())",
"def test_extra_sections_when_dict_with_nested_dicts(self):\n class MySchema(Schema):\n foo = DictOption(item=DictOption())\n\n config = StringIO(\"\"\"\n[__main__]\nfoo=dict1\n[dict1]\nbar=dict2\n[dict2]\nbaz=42\n\"\"\")\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n parser.parse_all()\n\n self.assertEqual(parser.values(),\n {'__main__': {'foo': {'bar': {'baz': '42'}}}})\n self.assertTrue(parser.is_valid())",
"def test_config_normalization():\n assert Container._normalize({\n 'grp': {\n '__default__': {\n 'dep1': 'x',\n 'dep2': 'z',\n '$arg': 100,\n 'other': ['a', 'b'],\n '$dic': {'a': 1}\n },\n 'ent': {\n 'dep1:dep1': 'y',\n 'other:other': ['c'],\n '$dic': {'b': 2}\n },\n 'ent2': {\n '$dic': {'c': 3}\n }\n }\n }) == {\n 'grp': {\n 'ent': {\n 'dep1': ('dep1', 'y'),\n 'dep2': ('dep2', 'z'),\n '$arg': 100,\n 'other': (None, (('other', 'c'),)),\n '$dic': {'b': 2},\n },\n 'ent2': {\n 'dep1': ('dep1', 'x'),\n 'dep2': ('dep2', 'z'),\n '$arg': 100,\n 'other': (None, (('other', 'a'), ('other', 'b'))),\n '$dic': {'c': 3}\n }\n }\n }",
"def _parse_top_level_dict(self, document, internal_type, key):\r\n if key not in document:\r\n return\r\n\r\n for k, v in document[key].iteritems():\r\n e = self.template.get_by_logical_id_typed(k, internal_type)\r\n for ek, ev in v.iteritems():\r\n e.add_child(self._handle_value(ek, ev))",
"def is_nested(input):\n return is_sequence(input) or isinstance(input, dict)",
"def YamlParser(config):\n\n assert(os.path.isfile(config))\n\n with open(config) as config_file:\n cfg = yaml.load(config_file, Loader=yaml.FullLoader)\n cfg = edict(cfg)\n\n return cfg",
"def readDefaultMGiniFile(mgfile):\n mgfile = mgfile\n parsedFile = []\n\n try:\n linestring = open(mgfile)\n except Exception as e:\n msg = \"Failed to open file %s\" % (mgfile)\n logger.log.error(msg)\n raise Exception(msg, e)\n\n section = dict()\n keys = []\n for line in linestring:\n if '[' in line:\n if section.get('section') is None:\n section = dict()\n section['section'] = line\n else:\n section['keys'] = keys\n parsedFile.append(section)\n section = dict()\n keys = []\n section['section'] = line\n else:\n keys.append(line)\n\n linestring.close()\n return parsedFile",
"def load_yaml(self,infpath,attrpath):\n obj=yaml_manager.readfile(self.render(infpath))\n self.set_nested(attrpath,obj)",
"def lldb_parse_container(container):\n return lldb_build_digraph(lldb_container_root(container))",
"def parse_collection(entry, collection_type, element_type):\n\n return collection_type([element_type(x) for x in entry[1:-1].split(',')])",
"def test_parser_dict(fresh_aiida_env, incar_dict_example):\n\n parser = IncarParser(data=get_data_node('dict', dict=incar_dict_example))\n assert isinstance(parser.incar, get_data_class('dict'))",
"def test_get_interpolation_keys_dict(self):\n class MySchema(Schema):\n foo = DictOption(spec={'a': IntOption()})\n config = StringIO(textwrap.dedent(\"\"\"\n [__noschema__]\n bar=4\n [__main__]\n foo=mydict\n [mydict]\n a=%(bar)s\n \"\"\"))\n expected = ('mydict', set([]))\n\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n result = parser._get_interpolation_keys('__main__', 'foo')\n self.assertEqual(result, expected)",
"def populate ( json_meta_group ):\n\n # Set name of logger with calling details.\n ls = \"%s by %s\" % ( __name__ , '__populate__' )\n logger = logging.getLogger( ls )\n\n # Extract meta group name info ie ENV.\n for k in json_meta_group.keys():\n\n meta_group = k\n\n # Retrieve meta_group list from config module.\n string_list = \"cf.%s\" % ( meta_group )\n list = eval( string_list ) \n \n # Add every members as children in JSON.\n index = 0\n for index in range( len( list ) ):\n\n json_meta_group[meta_group]['children'].append( list[index] )",
"def interpret_config(self, config: Union[Dict[str, Any], \"ConfigParser\"]):\n # Sort sections by depth, so that we can iterate breadth-first. This\n # allows us to check that we're not expanding an undefined block.\n get_depth = lambda item: len(item[0].split(\".\"))\n for section, values in sorted(config.items(), key=get_depth):\n if section == \"DEFAULT\":\n # Skip [DEFAULT] section for now since it causes validation\n # errors and we don't want to use it\n continue\n parts = section.split(\".\")\n node = self\n for part in parts[:-1]:\n if part == \"*\":\n node = node.setdefault(part, {})\n elif part not in node:\n err_title = f\"Error parsing config section. Perhaps a section name is wrong?\"\n err = [{\"loc\": parts, \"msg\": f\"Section '{part}' is not defined\"}]\n raise ConfigValidationError(self, err, message=err_title)\n else:\n node = node[part]\n node = node.setdefault(parts[-1], {})\n if not isinstance(node, dict):\n # Happens if both value *and* subsection were defined for a key\n err = [{\"loc\": parts, \"msg\": \"found conflicting values\"}]\n raise ConfigValidationError(f\"{self}\\n{({part: dict(values)})}\", err)\n for key, value in values.items():\n try:\n node[key] = srsly.json_loads(config.get(section, key))\n except Exception as e:\n raise ValueError(\n f\"Error reading key '{key}' in section '{section}': {e}\"\n )",
"def read_ini(ini):\n if isinstance(ini,dict): return ini\n if isinstance(ini,str):\n if os.path.exists(ini): ini = open(ini).read()\n config = RawConfigParser()\n config.optionxform=str\n config.readfp(StringIO(u'[root]\\n'+ini))\n return dict(config.items('root'))\n else:\n raise ValueError('Unexpected type for ini file %s'%type(ini))",
"def parse_args():\n import argparse\n\n import gin\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--param\", nargs=\"*\", help=\"List of Gin parameter bindings\")\n parser.add_argument(\"--config\", nargs=\"*\", help=\"List of paths to the config files\")\n\n args = parser.parse_args()\n\n gin.parse_config_files_and_bindings(args.config, args.param)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Take an array of algorithm ID ints and return an array of PublicKeyCredentialParameters
|
def _generate_pub_key_cred_params(
supported_algs: List[COSEAlgorithmIdentifier],
) -> List[PublicKeyCredentialParameters]:
return [
PublicKeyCredentialParameters(type="public-key", alg=alg)
for alg in supported_algs
]
|
[
"def encode_anchors(anchors):\n nanchors = len(anchors) // 4\n keys = np.empty(shape=(nanchors), dtype=np.int64)\n\n for i in range(nanchors):\n idx = 4*i\n anchor = anchors[idx:idx+4]\n keys[i] = encode_anchor(anchor)\n\n return keys",
"def get_h_param_combinations(h_params):\n combinations = list(itertools.product(*h_params.values()))\n return [{key: val for key, val in zip(h_params.keys(), vals)} for vals in combinations]",
"def merge_encrypted_number_array(values):\n assert isinstance(values, (list, ndarray))\n pub_key = values[0].public_key\n bn, exp = [], []\n for i in range(len(values)):\n assert values[i].__len__() == 1\n bn.append(values[i].ciphertextBN(0))\n exp.append(values[i].exponent(0))\n ct = ipclCipherText(pub_key.pubkey, bn)\n return IpclPaillierEncryptedNumber(pub_key, ct, exp, len(values))",
"def key_scheduling(self, algorithm_list: bytes) -> List[int]:\n key_length = len(algorithm_list)\n s_boxes = [i for i in range(0, 256)]\n j = 0\n for i in range(0, 256):\n j = (j + s_boxes[i] + algorithm_list[i % key_length]) % 256\n tmp = s_boxes[j]\n s_boxes[j] = s_boxes[i]\n s_boxes[i] = tmp\n return s_boxes",
"def generateKeyPair(cls):",
"def sslbetot128bitideaciphers(self) :\n try :\n return self._sslbetot128bitideaciphers\n except Exception as e:\n raise e",
"def get_coeffs(weights):\n coeff_num = weights.__len__() - 1\n pub_key = weights.public_key\n\n bn = []\n exp = []\n for i in range(coeff_num):\n bn.append(weights.ciphertextBN(i))\n exp.append(weights.exponent(i))\n ct = ipclCipherText(pub_key.pubkey, bn)\n return IpclPaillierEncryptedNumber(pub_key, ct, exp, coeff_num)",
"def random_cipher():\n return np.random.permutation(26)",
"def ssltot128bitideaciphers(self) :\n try :\n return self._ssltot128bitideaciphers\n except Exception as e:\n raise e",
"def generate_keypair(bits):\n p = generate_prime(bits // 2)\n # print(p)\n q = generate_prime(bits // 2)\n # print(q)\n n = p * q\n return PrivateKey(p, q, n), PublicKey(n)",
"def get_public_keys_der_v3(self):\n\n if self._v3_siging_data is None:\n self.parse_v3_signing_block()\n\n public_keys = []\n\n for signer in self._v3_siging_data:\n public_keys.append(signer.public_key)\n\n return public_keys",
"def get_all_certs_keys():",
"def get_keys() -> List[Tuple[str, str]]:\n with authorized_keys() as ak:\n return [\n (hashlib.new(\"md5\", line.encode()).hexdigest(), line)\n for line in ak.read().split(\"\\n\")\n if line.strip()\n ]",
"def gen_DH_keys(p=DH_P, g=DH_G):\n private = randbelow(2**256) % p\n public = pow(g, private, p)\n return public, private",
"def _ensure_algorithm_key_combination(algorithm: int, key: PublicKey) -> None:\n if isinstance(key, rsa.RSAPublicKey):\n if _is_rsa(algorithm):\n return\n raise AlgorithmKeyMismatch('algorithm \"%s\" not valid for RSA key' % algorithm)\n if isinstance(key, dsa.DSAPublicKey):\n if _is_dsa(algorithm):\n return\n raise AlgorithmKeyMismatch('algorithm \"%s\" not valid for DSA key' % algorithm)\n if isinstance(key, ec.EllipticCurvePublicKey):\n if _is_ecdsa(algorithm):\n return\n raise AlgorithmKeyMismatch('algorithm \"%s\" not valid for ECDSA key' % algorithm)\n if isinstance(key, ed25519.Ed25519PublicKey):\n if algorithm == Algorithm.ED25519:\n return\n raise AlgorithmKeyMismatch(\n 'algorithm \"%s\" not valid for ED25519 key' % algorithm\n )\n if isinstance(key, ed448.Ed448PublicKey):\n if algorithm == Algorithm.ED448:\n return\n raise AlgorithmKeyMismatch('algorithm \"%s\" not valid for ED448 key' % algorithm)\n\n raise TypeError(\"unsupported key type\")",
"def rsa_keys(p: int = None, q: int = None, e: int = 3) -> RSA_Keys:\n\n if not p or p <= 1:\n p = matasano.math.random_big_prime(e=e)\n if not q or q <= 1:\n q = matasano.math.random_big_prime(e=e)\n\n n = p * q\n phi_n = (p - 1) * (q - 1)\n d = matasano.math.modinv(e, phi_n)\n\n return RSA_Keys(RSA_Priv(d, n), RSA_Pub(e, n))",
"def get_combinatorial_assay_metadata_list(self, protocol_pk, assay_metadata_pk):\n return self.protocol_to_combinatorial_metadata_dict[protocol_pk][assay_metadata_pk]",
"def _genU3CliffordParameters():\n base = np.arange(0, 4)/2 * np.pi # mutiples of pi/2\n all_combinations = list(it.product(*[base]*3))\n return np.array(all_combinations)",
"def parameter_values(drift_weight, upper_boundary, theta):\n\n parameter_combos = np.array(list(itertools.product(drift_weight, upper_boundary, theta)))\n\n # CHECK SIZE OF ARRAY\n print(\"Your parameter search space is size: {0}.\".format(parameter_combos.shape))\n\n return parameter_combos"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the default parameters. We're supering this as `Base` is setting up some basic globally required parameters. It's a must. We check for `self.name` before we set the destination paths for the service files as sometimes `self.name` is not provided (for instance, when retrieving status for all services under the init system.) `self.name` is set in `base.py`
|
def __init__(self, logger=None, **params):
super(SystemD, self).__init__(logger=logger, **params)
if self.name:
self.svc_file_dest = os.path.join(
constants.SYSTEMD_SVC_PATH, self.name + '.service')
self.env_file_dest = os.path.join(
constants.SYSTEMD_ENV_PATH, self.name)
|
[
"def __init__(self):\n self.basename = self.basename or self.__class__.__name__.lower()\n self.set_fields()",
"def __init__ ( self, fname=None ) :\n\n self.fname_cp = self.declareParameter( name='FNAME_CONFIG_PARS', val=fname, val_def='confpars.txt', type='str' )",
"def __init__(self, basepath, baseurl):\n self.basepath = basepath\n self.baseurl = baseurl or self.getBaseUrl()",
"def __init__(self, base):\n logging.debug('Base parameter is %s', base)\n if base.rfind('.') < 0:\n raise ValueError(f\"Expected path to file, received {base}\")\n self.basename = base[0:base.rfind('.')]\n self.reserved = {}",
"def __init__(__self__, *,\n name: pulumi.Input[str],\n parameters: pulumi.Input['RouteConfigurationOverrideActionParametersArgs']):\n pulumi.set(__self__, \"name\", 'RouteConfigurationOverride')\n pulumi.set(__self__, \"parameters\", parameters)",
"def add_default_params(self):\n self._params.add('daemonize')\n self._params.add('nodefaults')\n self._params.add_with_value('name', 'vnf{qemu},debug-threads=on'.format(\n qemu=self._opt.get('qemu_id')))\n self._params.add('no-user-config')\n self._params.add_with_value('monitor', 'none')\n self._params.add_with_value('display', 'none')\n self._params.add_with_value('vga', 'none')\n self._params.add('enable-kvm')\n self._params.add_with_value('pidfile', self._temp.get('pidfile'))\n self._params.add_with_value('cpu', 'host')\n self._params.add_with_value(\n 'machine', 'pc,accel=kvm,usb=off,mem-merge=off')\n self._params.add_with_value(\n 'smp', '{smp},sockets=1,cores={smp},threads=1'.format(\n smp=self._opt.get('smp')))\n self._params.add_with_value(\n 'object', 'memory-backend-file,id=mem,size={mem}M,'\n 'mem-path=/dev/hugepages,share=on'.format(mem=self._opt.get('mem')))\n self._params.add_with_value(\n 'm', '{mem}M'.format(mem=self._opt.get('mem')))\n self._params.add_with_value('numa', 'node,memdev=mem')\n self._params.add_with_value('balloon', 'none')",
"def _ensure_base_name_if_needed(self, base_name):\n if self.name is None:\n self._base_name = base_name",
"def _set_fs_copy_params(self, src=None, dst=None):\n # First, initialize a new fs copy command\n self.fs_copy_cmd = FsCopy(self.daos_cmd, self.log)\n\n # set preserve-props path if it was used in test case\n if self.preserve_props_path:\n self.fs_copy_cmd.set_params(preserve_props=self.preserve_props_path)\n\n if src is not None:\n self.fs_copy_cmd.set_params(src=src)\n\n if dst is not None:\n self.fs_copy_cmd.set_params(dst=dst)",
"def setDefaults(self):\n\t\tself.user = 'hdfs'\n\t\tself.releaseDir = '/usr'\n\t\tself.configLocal = '/etc/sysconfig/hadoop'",
"def set_default_parameter_name(self, name):\n if 'parameter_name' not in self.attr:\n self.attr['parameter_name'] = name",
"def defaultParams(self):\n self.blurs = [[-1, self.fileRes], [-1, self.fileRes],[-1, self.fileRes]] \n self.gradient = [[False,True], [False,True], [False,True]]\n self.similarityMetric = [[\"CC\", \"CC\"],[\"CC\", \"CC\"],[\"CC\", \"CC\"]]\n self.weight = [[1,1],[1,1],[1,1]]\n self.radiusHisto = [[3,3],[3,3],[3,3]]\n self.transformationModel = [\"SyN[0.1]\", \"SyN[0.1]\", \"SyN[0.1]\"]\n self.regularization = [\"Gauss[2,1]\", \"Gauss[2,1]\", \"Gauss[2,1]\"]\n self.iterations = [\"100x100x100x0\", \"100x100x100x20\", \"100x100x100x100\"]\n self.useMask = [False, True, True]\n self.memoryRequired = [0.177, 1.385e-7, 2.1e-7]",
"def __init__(self, base_url=BASE_URL):\n self.base_url = base_url",
"def __init__(self, settings_file_name = None, working_directory = None):\n super().__init__(settings_file_name, working_directory)\n \n self.file_name = self.get_setting(\"directory_file_name\")\n try:\n # Read file of services into a dictionary\n with open(os.path.join(self.working_directory, self.file_name), \"r\") as file:\n data = file.read()\n self.services = json.loads(data)\n except:\n # File of services does not exist, so create it an empty dictionary and save it to the file\n self.services = dict()\n data = json.dumps(self.services)\n with open(os.path.join(self.working_directory, self.file_name), \"w\") as file:\n file.write(data)",
"def __init__(self, *args, **kwargs):\n super(FioBase, self).__init__(*args, **kwargs)\n self.fio_cmd = None\n self.processes = None\n self.manager = None",
"def __init__(self, name, *args, **kwargs):\n self.name = name\n self.path = os.path.join(KIM_SCHEMAS_DIR,name+'.json')\n super(Schema,self).__init__(self.path,flag='r',*args,**kwargs)",
"def _inject_common_parameters(self) -> None:\n if not self.parameters.get(\"environment\"):\n self.parameters[\"environment\"] = self.__ctx.env.name\n if not self.parameters.get(\"region\"):\n self.parameters[\"region\"] = self.region",
"def setup(self):\n # Call the base class setup first so that all of the variables are fully initialized and formatted.\n super().setup()\n\n # Write out the custom config\n self.writeCustomConfig()",
"def __init__(self, HOST='0.0.0.0', PORT='8080', ROOT='services/http'):\r\n self.__dict__['HOST'] = {\"value\": HOST, \"required\": True, \"description\": \"The example module name\"}\r\n self.__dict__['PORT'] = {\"value\": PORT, \"required\": True, \"description\": \"Your personal name\"}\r\n self.__dict__['ROOT'] = {\"value\": ROOT, \"required\": True, \"description\": \"Your personal name\"}",
"def __init__(self, sm_params, name=None, io_connection=None, io_type=None, variant=None, initial_state=None):\n sm_params = sm_params.copy()\n self._use_proxy_pc = self._is_proxy_pc_in_sm_params(sm_params, Scpi.proxy_pc)\n initial_state = initial_state if initial_state is not None else Scpi.scpi\n super(Scpi, self).__init__(sm_params=sm_params, name=name, io_connection=io_connection, io_type=io_type,\n variant=variant, initial_state=initial_state)\n self.logger = logging.getLogger('moler.scpi')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate service files and returns a list of them. Note that env var names will be capitalized using a Jinja filter. This is template dependent. Even though a param might be named `key` and have value `value`, it will be rendered as `KEY=value`. We retrieve the names of the template files and see the paths where the generated files will be deployed. These are files a user can just take and use. If the service is also installed, those files will be moved to the relevant location on the system. Note that the parameters required to generate the file are propagated automatically which is why we don't pass them explicitly to the generating function. `self.template_prefix` and `self.generate_into_prefix` are set in `base.py` `self.files` is an automatically generated list of the files that were generated during the process. It should be returned so that the generated files could be printed out for the user.
|
def generate(self, overwrite=False):
super(SystemD, self).generate(overwrite=overwrite)
self._validate_init_system_specific_params()
svc_file_template = self.template_prefix + '.service'
env_file_template = self.template_prefix
self.svc_file_path = self.generate_into_prefix + '.service'
self.env_file_path = self.generate_into_prefix
self.generate_file_from_template(svc_file_template, self.svc_file_path)
self.generate_file_from_template(env_file_template, self.env_file_path)
return self.files
|
[
"def create_files(self) -> None:\n data = self.data()\n for file in sorted(self.files):\n logger.debug(\n \"node(%s) service(%s) template(%s)\", self.node.name, self.name, file\n )\n rendered = self._get_rendered_template(file, data)\n file_path = Path(file)\n self.node.create_file(file_path, rendered)",
"def get_files_to_generate(self):\r\n pass",
"def services_file(path):\n return []",
"def setup_service_config_files(self):\n config_files = list()\n deploy_configs = GCP_SERVICE_CONFIG_MAP[self.deploy_type]\n\n for key, service in deploy_configs.items():\n\n if self.deploy_sub_type in service:\n config = service[self.deploy_sub_type]\n else:\n config = service['default']\n\n # check to see if we are deploying this service.\n if service['type'] == 'service' and key not in self.services:\n continue\n\n config_file = self.write_config_file(key, config, service.get('config_file', None))\n config_files.append(config_file)\n\n return config_files",
"def test_service_generate_file(self) -> None:\n self._config.namespace = 'n1'\n self._config.service = 's1'\n self._config.color = 'c1'\n self._config.data_store_exec = self._get_runnable_cmd(\n 0, validate_templates({\n 'schema-version': 'v1',\n 'document-version': 'x',\n 'gateway-templates': [],\n 'service-templates': [{\n 'namespace': 'n1',\n 'service': 's1',\n 'color': 'c1',\n 'index': 199,\n 'purpose': 'out-1.txt',\n 'template': '1 {{schema-version}} 2 {{has_admin_port}} 3 {{has_clusters}} 4',\n }, {\n 'namespace': 'n1',\n 'service': 's1',\n 'color': 'c1',\n 'index': 199,\n 'purpose': 'out-2.txt',\n 'template': 'z {{schema-version}} y',\n }],\n })\n )\n self._config.discovery_map_exec = self._get_runnable_cmd(0, validate_discovery_map({\n 'schema-version': 'v1',\n 'document-version': 'd12',\n 'namespaces': [{\n 'namespace': 'n1',\n 'network-id': 'nk1',\n 'gateways': {'instances': [], 'prefer-gateway': True, 'protocol': 'http2'},\n 'service-colors': [{\n 'service': 's1',\n 'color': 'c1',\n 'index': 199,\n 'routes': [],\n 'instances': [],\n 'namespace-egress': [],\n }],\n }],\n }))\n\n gateway = generate.GenerateServiceConfiguration(self._config)\n gateway.generate_file(3, 4)\n\n out_file_1 = os.path.join(self._config.envoy_config_dir, 'out-1.txt')\n self.assertTrue(os.path.isfile(out_file_1))\n out_file_2 = os.path.join(self._config.envoy_config_dir, 'out-2.txt')\n self.assertTrue(os.path.isfile(out_file_2))\n\n with open(out_file_1, 'r') as f:\n self.assertEqual('1 v1 2 True 3 False 4', f.read())\n\n with open(out_file_2, 'r') as f:\n self.assertEqual('z v1 y', f.read())",
"def generate_files_for_template(env, template_file, in_files, opts,\n out_dir, emboss_path):\n # Open template\n with open(template_file, \"r\") as template_contents:\n template_object = env.from_string(template_contents.read())\n _, template_extension = os.path.splitext(template_file)\n\n # Create output dir\n if not os.path.exists(out_dir):\n try:\n os.makedirs(out_dir)\n except OSError:\n print(\"Could not make output directory\", out_dir)\n sys.exit(1)\n\n for peripheral in in_files:\n generate_source_file(\n template_object,\n peripheral,\n opts,\n template_extension,\n out_dir,\n emboss_path\n )",
"def generate_files(self) -> List[Tuple[str, str, str]]:\n raise NotImplementedError() # NOQA",
"def _create_service_template(self):\n cmd = self._generate_cmd_and_expected_status()\n service_template = copy.deepcopy(self.service_template)\n service_template['container']['command'] = '{} {}'.format(cmd, random.randint(10, 30))\n return service_template",
"def get_templates(self) -> dict[str, str]:\n templates = {}\n for file in self.files:\n file_path = Path(file)\n template_path = get_template_path(file_path)\n if file in self.custom_templates:\n template = self.custom_templates[file]\n template = self.clean_text(template)\n elif self.templates.has_template(template_path):\n template = self.templates.get_template(template_path).source\n else:\n try:\n template = self.get_text_template(file)\n except Exception as e:\n raise ConfigServiceTemplateError(\n f\"node({self.node.name}) service({self.name}) file({file}) \"\n f\"failure getting template: {e}\"\n )\n template = self.clean_text(template)\n templates[file] = template\n return templates",
"def service_templates(self):\n return set(\n [t for t in self.list_templates() if '$service/' in t]\n )",
"def generate_pyfiles(self):\n _add = self.data.append\n # Create structure for render ouput\n to_generate = [\n ['__init__.py', ''],\n ['views.py', self.generate_views],\n ['urls.py', self.generate_routes],\n ['admin.py', self.generate_admin],\n ['models.py', self.generate_models],\n ['forms.py', self.generate_model_forms],\n ['model_factories.py', self.generate_model_factories],\n ['tests.py', self.generate_tests],\n ['extra_settings.py', self.generate_settings]\n ]\n for item in to_generate:\n filename, output = item\n output = output() if hasattr(output, '__call__') else output\n _add({'file': filename, 'output': output})\n\n # Save all rendered output as new files for the app\n for rendered in self.data:\n self.save(rendered['output'], rendered['file'])",
"def get_files_to_deploy(self) -> List[FileToDeploy]:",
"def generate_requests(self):\n # load SFCs from specified JSON file\n with open(self.request_path, 'rb') as file:\n requests = json.load(file)\n\n def parse_vnfs(vnfs): return [tuple(vnf.values()) for vnf in vnfs]\n\n # create SFC objects with parameters specified in the JSON file\n req = []\n for sfc in requests:\n vnfs = parse_vnfs(sfc.pop('vnfs'))\n sfc = ServiceFunctionChain(vnfs=vnfs, **sfc)\n\n req.append(sfc)\n\n return req",
"def list(sort_by, descending, model_storage, logger):\n\n logger.info('Listing all service templates...')\n service_templates_list = model_storage.service_template.list(\n sort=utils.storage_sort_param(sort_by, descending))\n\n column_formatters = \\\n dict(description=table.trim_formatter_generator(DESCRIPTION_FIELD_LENGTH_LIMIT))\n table.print_data(SERVICE_TEMPLATE_COLUMNS, service_templates_list, 'Service templates:',\n column_formatters=column_formatters)",
"def test_service_generate_file__no_match(self) -> None:\n self._config.namespace = 'n1'\n self._config.service = 's1'\n self._config.color = 'c1'\n self._config.data_store_exec = self._get_runnable_cmd(\n 0, validate_templates({\n 'schema-version': 'v1',\n 'document-version': 'x',\n 'gateway-templates': [],\n 'service-templates': [{\n 'namespace': 'n1',\n 'service': 's1',\n 'color': 'c1',\n 'index': 199,\n 'purpose': 'out-1.txt',\n 'template': '1 {{schema-version}} 2 {{has_admin_port}} 3 {{has_clusters}} 4',\n }],\n })\n )\n self._config.discovery_map_exec = self._get_runnable_cmd(0, validate_discovery_map({\n 'schema-version': 'v1',\n 'document-version': 'd12',\n 'namespaces': [],\n }))\n\n gateway = generate.GenerateServiceConfiguration(self._config)\n res = gateway.generate_file(3, 4)\n self.assertEqual(1, res)",
"def test_service_get_templates__no_templates(self) -> None:\n self._config.namespace = 'n1'\n self._config.service = 's1'\n self._config.color = 'c1'\n self._config.data_store_exec = self._get_runnable_cmd(\n 0, {\n 'schema-version': 'v1',\n 'document-version': 't1',\n 'gateway-templates': [],\n 'service-templates': [],\n },\n )\n gateway = generate.GenerateServiceConfiguration(self._config)\n templates = gateway.get_templates()\n self.assertEqual({}, templates)",
"def gen_service_urls():\n base_url = common_bits.base_url\n all_base_service_urls = []\n service_urls = ['ah', 'any', 'esp', 'group', 'icmp', 'icmpv6', 'ip', 'tcp', 'tcpudp', 'udp']\n for item in service_urls:\n base_service_url = '{}/service/{}'.format(base_url,item)\n all_base_service_urls.append(base_service_url)\n\n return all_base_service_urls",
"def list(service_template_name,\n sort_by,\n descending,\n model_storage,\n logger):\n if service_template_name:\n logger.info('Listing services for service template {0}...'.format(\n service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n filters = dict(service_template=service_template)\n else:\n logger.info('Listing all services...')\n filters = {}\n\n services_list = model_storage.service.list(\n sort=utils.storage_sort_param(sort_by=sort_by, descending=descending),\n filters=filters)\n table.print_data(SERVICE_COLUMNS, services_list, 'Services:')",
"def find_services(self, fileset):\n services = []\n self.service_files = {}\n self.files = []\n for pfile in fileset.proto_file:\n self.files.append(pfile)\n for service in pfile.service:\n self.service_files[service.name] = pfile\n services.append(service)\n return services"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Install the service on the local machine This is where we deploy the service files to their relevant locations and perform any other required actions to configure the service and make it ready to be `start`ed.
|
def install(self):
super(SystemD, self).install()
self.deploy_service_file(self.svc_file_path, self.svc_file_dest)
self.deploy_service_file(self.env_file_path, self.env_file_dest)
sh.systemctl.enable(self.name)
sh.systemctl('daemon-reload')
|
[
"def install_service():\n result = CliRunner().invoke(cli, args=args.COMMON_ARGS + [home, defs.SERVICE,\n commands.INSTALL, service_path])\n sanity_check(result, home)\n assert os.path.exists(os.path.join(home, defs.SERVICES, service_name, \"{}_service.py\".format(service_name)))",
"def install_service(service):\n sudo(\"systemctl enable %s\" % service, warn_only=True)\n sudo(\"systemctl start %s\" % service, warn_only=True)",
"def InstallService(self, entry):\n if entry.get('status') == 'on':\n cmd = \"start\"\n elif entry.get('status') == 'off':\n cmd = \"stop\"\n return self.cmd.run(self.get_svc_command(entry, cmd)).success",
"def install_dependencies_on_host(service):\n click.echo('Installing service {0} dependencies on host.'.format(service))\n service = get_service(service)\n service.install_dependencies_on_host()",
"def service_start(svc):\n # TODO Change to subprocess\n system('systemctl daemon-reload')\n system('systemctl start {}'.format(svc))",
"def _setup_master_service(self):\n logger.debug('Installing Lithops in {}'.format(self.backend.master))\n ssh_client = self.backend.master.get_ssh_client()\n\n src_proxy = os.path.join(os.path.dirname(__file__), 'worker.py')\n create_handler_zip(LOCAL_FH_ZIP_LOCATION, src_proxy)\n current_location = os.path.dirname(os.path.abspath(__file__))\n controller_location = os.path.join(current_location, 'master.py')\n\n logger.debug('Uploading lithops files to {}'.format(self.backend.master))\n files_to_upload = [(LOCAL_FH_ZIP_LOCATION, '/tmp/lithops_standalone.zip'),\n (controller_location, '/tmp/master.py'.format(STANDALONE_INSTALL_DIR))]\n ssh_client.upload_multiple_local_files(files_to_upload)\n os.remove(LOCAL_FH_ZIP_LOCATION)\n\n vm_data = {'instance_name': self.backend.master.name,\n 'ip_address': self.backend.master.ip_address,\n 'instance_id': self.backend.master.instance_id}\n\n script = get_master_setup_script(self.config, vm_data)\n\n logger.debug('Executing lithops installation process on {}'.format(self.backend.master))\n logger.debug('Be patient, initial installation process may take up to 5 minutes')\n ssh_client.run_remote_command(script)\n logger.debug('Lithops installation process completed')",
"def start(self) -> None:\n logger.info(\"node(%s) service(%s) starting...\", self.node.name, self.name)\n self.create_shadow_dirs()\n self.create_dirs()\n self.create_files()\n wait = self.validation_mode == ConfigServiceMode.BLOCKING\n self.run_startup(wait)\n if not wait:\n if self.validation_mode == ConfigServiceMode.TIMER:\n self.wait_validation()\n else:\n self.run_validation()",
"def install(self):\n # The package structure for LiteServ is different pre 1.4. Handle for this case\n if has_dot_net4_dot_5(self.version_build):\n directory_path = \"couchbase-lite-net-msft-{}-liteserv/net45/LiteServ.exe\".format(self.version_build)\n else:\n directory_path = \"couchbase-lite-net-msft-{}-liteserv/LiteServ.exe\".format(self.version_build)\n\n status = self.ansible_runner.run_ansible_playbook(\"install-liteserv-windows.yml\", extra_vars={\n \"directory_path\": directory_path\n })\n\n if status != 0:\n raise LiteServError(\"Failed to install Liteserv on Windows host\")",
"def _setup_server_infra(self):\r\n server = self._conf_server\r\n self._service = 'vsftpd'\r\n\r\n #configure ftp control port on ftp server\r\n if self.ctr_port != 21 :\r\n server.exec_command(\"sed -i '/listen_port/d' /etc/vsftpd.conf\")\r\n server.exec_command(\"sed -i '/isten=/a\\listen_port={}' /etc/vsftpd.conf\".format(self.ctr_port))\r\n output =server.exec_command('grep listen_port /etc/vsftpd.conf')\r\n server.service_stop(self._service)\r\n server.service_start(self._service)\r\n\r\n self._file_info = server.create_file(size=self._file_size)\r\n self._local_file = self._local_file()\r\n logger.info('Created file: {}'.format(self._file_info))\r\n\r\n times = 0\r\n sleeptime = 0.5\r\n service_status = server.service_running(self._service)\r\n while not service_status and times <= 3:\r\n try:\r\n server.service_start(self._service)\r\n except ValueError as e:\r\n service_status = False\r\n else:\r\n service_status = True\r\n break\r\n time.sleep(sleeptime)\r\n times += 1\r\n\r\n if not service_status:\r\n raise ValueError(e)\r\n\r\n logger.info('Service {} is running'.format(self._service))",
"def deploy(service):\n click.echo('Deploying service {0}.'.format(service))\n service = get_service(service)\n service.deploy()",
"def start():\n app_svc = AppService.create(__package__)\n\n db_path = app_svc.config_svc[\"archive_db_path\"]\n init_db(db_path)\n\n app_svc.start(routes(config=app_svc.config_svc))",
"def install(self):\n\n # Install the host installation.\n if self.install_host_from_source:\n self._install_host_from_source()\n\n # Install the target installation.\n if self.install_from_source:\n self._install_target_from_source()\n else:\n self._install_target_from_existing_windows_version()",
"def installMaster(self):\n self.util.execRemoteScript(\"ipnat.sh\", [self.libvirt.host])\n self.util.execRemoteScript(\"fuelInstall.sh\", [self.util.remoteDir])",
"def run_service(service):\n service.start()\n service.wait()",
"def install(self):\n\t\tc = Common()\n\t\tc.banner()\n\t\tc.client_hosts()\n\n\t\thostname = Hostname()\n\t\thostname.check()\n\n\t\tl = Logger(c.client_name())\n\n\t\treader = open(self.logfile)\n\t\tstartline = len(reader.readlines())\n\n\t\tl.event_counter(startline)\n\n\t\ttry:\n\t\t\toperatingSystem = run(\"/bin/cat /etc/issue | /usr/bin/awk '{print $1}'\")\n\n\t\t\tif(operatingSystem=='Debian'):\n\t\t\t\trun('aptitude -y update && aptitude -y install puppet')\n\t\t\telse:\n\t\t\t\tprint '--->\\tOS not supported'\n\t\t\t\tsys.exit(0)\n\n\t\t\tpuppetthread = AddCert()\n\t\t\tpuppetthread.start()\n\n\t\t\tpuppetthread.join()\n\n\t\texcept Exception, e:\n\t\t\tprint 'error :', e\n\n\t\texit(0)",
"def start_services(self):",
"def set_service(self, service):\n SHIM_LOGGER.info('Starting service')\n self.request_queue = service.request_queue\n\n self.service = service\n self._request('init')",
"def install_initd_svc(self, name, script_path=None, script_data=None):\n if self.get_start_system() != osutil.START_INITD:\n raise errors.EnvError('Cannot install init.d service in non-init.d environment')\n\n if script_data is None and script_path is None:\n raise ValueError('Bot script path and data are None')\n\n if script_data is None:\n with open(script_path, 'r') as fh:\n script_data = fh.read()\n\n initd_path = os.path.join('/etc/init.d', name)\n if os.path.exists(initd_path):\n os.remove(initd_path)\n self.audit.audit_delete(initd_path)\n\n with util.safe_open(initd_path, mode='w', chmod=0o755) as handle:\n handle.write(script_data)\n self.audit.audit_file_write(initd_path)\n\n ret = self.exec_shell('sudo chkconfig --add %s' % util.escape_shell(name))\n if ret != 0:\n raise errors.SetupError('Error: Could not reload systemctl, code: %s\\n' % ret)\n\n return 0",
"def deploy():\n test()\n require('hosts', provided_by=servers)\n require('path')\n env.release = time.strftime('%Y-%m-%d-%H-%M')\n upload_tar_from_git()\n install_requirements()\n install_site()\n symlink_current_release()\n migrate()\n collect_static()\n restart_webserver()\n remove_remote_package()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Uninstall the service. This is supposed to perform any cleanup operations required to remove the service. Files, links, whatever else should be removed. This method should also run when implementing cleanup in case of failures. As such, idempotence should be considered.
|
def uninstall(self):
sh.systemctl.disable(self.name)
sh.systemctl('daemon-reload')
if os.path.isfile(self.svc_file_dest):
os.remove(self.svc_file_dest)
if os.path.isfile(self.env_file_dest):
os.remove(self.env_file_dest)
|
[
"def _uninstall_service(service_name) -> None:\n if service_name.startswith('/'):\n service_name = service_name[1:]\n # Note service-names *cannot* have underscores in them.\n service_name = service_name.replace(\"_\", \"-\")\n log.info(\"Uninstalling {}.\".format(service_name))\n sdk_install.uninstall(config.PACKAGE_NAME,\n service_name)",
"def async_unregister(self) -> None:\n LOGGER.debug(\n \"Unregistering Spook service: %s.%s\",\n self.domain,\n self.service,\n )\n\n self.hass.services.async_remove(self.domain, self.service)",
"def _CleanupInstall(self):\n logging.info(\"Stoping service %s.\", self.service_name)\n _VerboseCheckCall([\"sc\", \"stop\", self.service_name])\n\n msiexec_args = [\n \"msiexec\",\n \"/q\",\n \"/x\",\n glob.glob(os.path.join(args.output_dir, \"dbg_*_amd64.msi\"))\n .pop()\n .replace(\"/\", \"\\\\\"),\n ]\n _VerboseCheckCall(msiexec_args)",
"async def stop(self):\n sv_type = \"service\" if self.depth < 2 else \"sub-service\"\n self.logger.debug(self.indented(f\"Stopping {sv_type} {self.name}.\"))\n\n # Stop the sub-services\n for name, service in tuple(self.services.items()):\n await service.stop()\n del self.services[name]\n\n await self.cleanup()\n self.logger.debug(self.indented(f\"... {sv_type} stopped.\"))\n self.started = False",
"def remove_service(self, id):\n if not id in self.services:\n raise PluginError(\"Service not found: {0}\".format(id))\n\n logger.debug(\"Unregistering service: {}\".format(id))\n del self.services[id]",
"def destory_service(self, sid):\n _svc = self.get_service(sid)\n if _svc:\n if isinstance(_svc, ServiceWraperInProcess):\n _svc.terminate()\n else:\n warnings.warn('destory just available for ServiceWraperInProcess!')",
"def _uninstall(self):\n self.log.info('Uninstalling \"{schema}\"'.format(**self.env))\n with higher_log_indent():\n self._unlink()\n self._delete()",
"def service_delete(container, sysdir=constants.SYSTEMD_DIR, log=None):\n log = log or common.configure_logging(__name__)\n # prefix is explained in the service_create().\n service = 'tripleo_' + container\n\n sysd_unit_f = service + '.service'\n sysd_health_f = service + '_healthcheck.service'\n sysd_timer_f = service + '_healthcheck.timer'\n sysd_health_req_d = sysd_unit_f + '.requires'\n sysd_health_req_f = sysd_health_req_d + '/' + sysd_timer_f\n for sysd_f in sysd_health_req_f, sysd_unit_f, sysd_health_f, sysd_timer_f:\n if os.path.isfile(sysdir + sysd_f):\n log.debug('Stopping and disabling systemd service for %s' %\n service)\n sysd_unit = os.path.basename(sysd_f)\n try:\n subprocess.check_call(['systemctl', 'stop', sysd_unit])\n subprocess.check_call(['systemctl', 'disable', sysd_unit])\n except subprocess.CalledProcessError:\n log.exception(\"systemctl failed\")\n raise\n log.debug('Removing systemd unit file %s' % sysd_f)\n if os.path.exists(sysdir + sysd_f):\n os.remove(sysdir + sysd_f)\n try:\n subprocess.check_call(['systemctl', 'daemon-reload'])\n except subprocess.CalledProcessError:\n log.exception(\"systemctl failed\")\n raise\n else:\n log.info('No systemd unit file was found for %s' % sysd_f)\n\n if os.path.exists(os.path.join(sysdir, sysd_health_req_d)):\n log.info('Removing %s.requires' % service)\n os.rmdir(os.path.join(sysdir, sysd_health_req_d))",
"def remove(self):\n bundle_id = \"com.couchbase.LiteServ-iOS\"\n if self.storage_engine == \"SQLCipher\":\n bundle_id = \"com.couchbase.LiteServ-iOS-SQLCipher\"\n\n log_info(\"Removing LiteServ\")\n\n self.stop()\n\n # Stop the simulator\n log_info(\"device_id: {}\".format(self.device_id))\n output = subprocess.check_output([\n \"killall\", \"Simulator\"\n ])\n\n # Erase the simulator\n output = subprocess.check_output([\n \"xcrun\", \"simctl\", \"erase\", self.device_id\n ])\n\n if bundle_id in output:\n raise LiteServError(\"{} is still present after uninstall\".format(bundle_id))",
"def cleanup_services(self):\n services = self.list_services()\n\n for service in services:\n if service.namespace in self.managed_namespaces:\n service.delete()",
"def __stop_general_service(self, service_name):\n if service_name not in self.__service_processes:\n # no service to stop\n return\n try:\n self.__service_processes.pop(service_name).interrupt()\n except RuntimeError:\n # process already succeeded, race condition\n pass\n\n service = self.services.pop(service_name)\n for subservice in service.subservices:\n # deregister subservices\n self.__stop_general_service(subservice.name)",
"def stopService(self):\n Service.stopService(self)\n d = self._service.stopService()\n return d.addCallback(lambda _: self._client.disconnect())",
"def stop_services(self):",
"def destroy_magnum_service(self, magnum_service_id):",
"def delete_mgmt_service(self):\n return self._delete(\"service\", ApiService, api_version=6)",
"def unregisterService(self, caller_id, service, service_api):\n try:\n self.ps_lock.acquire()\n retval = self.reg_manager.unregister_service(service, caller_id, service_api)\n if 0: #TODO\n self._notify_service_update(service, service_api)\n mloginfo(\"-SERVICE [%s] %s %s\", service, caller_id, service_api)\n finally:\n self.ps_lock.release()\n\n if retval[2] == 0:\n return retval\n\n if not self._blacklisted_service(service):\n args = (caller_id, service, service_api)\n if self.sd is not None:\n remote_master_uri = self.sd.get_remote_services().values()\n if len(remote_master_uri) > 0:\n print 'Remote unregisterService(%s, %s, %s)' % args\n for m in remote_master_uri:\n print '... on %s' % m\n master = xmlrpcapi(m)\n code, msg, val = master.remoteUnregisterService(*args)\n if code != 1:\n logwarn(\"unable to unregister service [%s] with master %s: %s\"%(service, m, msg))\n \n return retval",
"def teardown():\n service_id = service[\"id\"]\n log.info(f\"Deleting service with id {service_id}\")\n delete_response = pagerduty_api.delete(f\"services/{service_id}\")\n msg = f\"Deletion of service {service_id} failed\"\n assert delete_response.ok, msg",
"def advapi32_DeleteService(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hService\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def unregister(self):\n if self.info:\n self.zeroconf.unregister_service(self.info)\n self.zeroconf.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of the statuses of the `name` service, or if name is omitted, a list of the status of all services for this specific init system. There should be a standardization around the status fields. There currently isn't. `self.services` is set in `base.py`
|
def status(self, name=''):
super(SystemD, self).status(name=name)
svc_list = sh.systemctl('--no-legend', '--no-pager', t='service')
svcs_info = [self._parse_service_info(svc) for svc in svc_list]
if name:
names = (name, name + '.service')
# return list of one item for specific service
svcs_info = [s for s in svcs_info if s['name'] in names]
self.services['services'] = svcs_info
return self.services
|
[
"def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')",
"def get_service_statuses():\n\n # We'll collect the statuses for the service in a list.\n # Note: increasing the \"minutes\" value will reduce the chances of an\n # getting no status, but also potentially might give a late result\n client = get_monasca_client()\n parms = {\n \"name\": \"http_status\",\n \"start_time\":\n (datetime.utcnow() - timedelta(minutes=1)).isoformat(),\n \"group_by\": \"service\"\n }\n\n measurements = None\n try:\n measurements = client.metrics.list_measurements(**parms)\n if not measurements:\n LOG.error(\"Empty measurements from Monasca\")\n abort(404, \"Unable to retrieve any statuses\")\n except Exception as e:\n LOG.error(\"Unable to access Monasca: %s\" % e)\n abort(503, \"Monasca service unavailable\")\n\n statuses = []\n for m in measurements:\n service = m['dimensions']['service']\n # we get the last measurement value, which is also the latest\n val_idx = m['columns'].index('value')\n if not m['measurements']:\n status = \"unknown\"\n else:\n value = m['measurements'][-1][val_idx]\n if value == 0:\n status = \"up\"\n else:\n status = \"down\"\n statuses.append({\n 'name': service,\n 'status': status\n })\n\n return jsonify(statuses)",
"def get_availables_services(self):\r\n self._service_locator.get_availables_services()",
"def test_legacy_list_service_status(self):\n\n # Set the policy file as this is an admin-only API\n self.policy({'find_service_statuses': '@'})\n\n response = self.client.get('/service_status/')\n\n # Check the headers are what we expect\n self.assertEqual(200, response.status_int)\n self.assertEqual('application/json', response.content_type)\n\n # Check the body structure is what we expect\n self.assertIn('service_statuses', response.json)\n self.assertIn('links', response.json)\n self.assertIn('self', response.json['links'])\n\n # Test with 0 service_statuses\n # Seeing that Central is started there will be 1 here already..\n self.assertEqual(0, len(response.json['service_statuses']))\n\n data = [self.update_service_status(\n hostname=\"foo%s\" % i, service_name=\"bar\") for i in range(0, 10)]\n\n self._assert_paging(data, '/service_status', key='service_statuses')",
"def service_names(self):\n return self.services.keys()",
"def get_services(self):\n\n # try to get services\n try:\n\n # get services\n command = str('kubectl get services')\n subprocess.call(command.split())\n\n # handle exception\n except:\n\n # raise Exception\n raise Exception('I could not get the list of services')",
"def service_status(service_name):\n try:\n service_definition = services[service_name]\n running_resources = docker.count_containers_by_image(\n service_definition['image'])\n response = {}\n response['running_resources'] = running_resources\n return response\n except KeyError:\n abort(501, 'Undefined service: %s.' % service_name)",
"def getServicesInfo(self):\n res = self.serv.getServicesInfo()\n return res",
"def list_services(ctx):\n\n ctx.respond(ctx._(\"I am running: {services}\").format(\n services=\", \".join(ctx.bot.services))\n )",
"def services(self):\n return self.__services",
"def advapi32_EnumServicesStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"dwServiceType\", \"dwServiceState\", \"lpServices\", \"cbBufSize\", \"pcbBytesNeeded\", \"lpServicesReturned\", \"lpResumeHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_services(self, names: typing.List[str] = None) -> typing.List[ServiceInfo]:\n query = None\n if names is not None:\n query = {'names': ','.join(names)}\n resp = self._request('GET', '/v1/services', query)\n return [ServiceInfo.from_dict(info) for info in resp['result']]",
"def get_services(self, service_type):\n with open(os.path.join(self.working_directory, self.file_name), \"r\") as file:\n data = file.read()\n self.services = json.loads(data)\n\n if service_type:\n return [s for s in self.services if s[0] == service_type]\n else:\n return [s for s in self.services]",
"def do_service_list(cs, args):\r\n result = cs.services.list(host=args.host, binary=args.binary)\r\n columns = [\"Binary\", \"Host\", \"Zone\", \"Status\", \"State\", \"Updated_at\"]\r\n # NOTE(jay-lau-513): we check if the response has disabled_reason\r\n # so as not to add the column when the extended ext is not enabled.\r\n if result and hasattr(result[0], 'disabled_reason'):\r\n columns.append(\"Disabled Reason\")\r\n if result:\r\n print 'OKKKKKKKKK'\r\n utils.print_list(result, columns)",
"def get_status (self, id):\n for status in self._services:\n if status.id == id:\n return status\n else:\n log.error(\"Service status for service: %s is missing!\" % id)",
"def _all_services(type_, *args, **kwargs):\n return all_srvs[type_]",
"def get_servicesinfo(ns):\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n\n # Firewall\n try:\n fw = ''\n firewalld = get_service(ns, 'firewalld')\n if firewalld and firewalld.Status == 'OK':\n fw = 'on (firewalld)'\n else:\n iptables = get_service(ns, 'iptables')\n if iptables and iptables.Status == 'OK':\n fw = 'on (iptables)'\n if not fw:\n fw = 'off'\n except Exception:\n fw = 'N/A'\n tf.produce_output([('Firewall:', fw)])\n\n # Logging\n try:\n logging = ''\n journald = get_service(ns, 'systemd-journald')\n if journald and journald.Status == 'OK':\n logging = 'on (journald)'\n else:\n rsyslog = get_service(ns, 'rsyslog')\n if rsyslog and rsyslog.Status == 'OK':\n logging = 'on (rsyslog)'\n if not logging:\n logging = 'off'\n except Exception:\n logging = 'N/A'\n tf.produce_output([('Logging:', logging)])\n\n return []",
"def getServiceNames(self):\n self.send_getServiceNames()\n return self.recv_getServiceNames()",
"def get_services(self):\n xpath = [\"Services\", \"Service\"]\n return self.find_anywhere(xpath)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return True if the init system exists and False if not.
|
def is_system_exists():
return is_system_exists()
|
[
"def _is_installed(self):\n return self._system.exists(os.path.join(self.get_install_path(), \"bin/root\"))",
"def _is_system_installed( self ):\n return self._system.test_library(self._library, self._headers)",
"def is_installed() -> bool:\n if platform.system() in (\"Linux\", \"Darwin\"):\n return shutil.which(CMD) is not None\n return Path(DEFAULT_WIN_OPENSCAD_PATH).exists()",
"def is_installed(self):\n return False",
"def check_state(self):\n # First see if installed on the system\n if self._config is not None:\n result = self._system.execute(\"which\", [self._config])\n logger.debug(\"Command which gives config location at %r\" % result[1])\n if result[1] is not None and result[1] != \"\" and result[1] == \"\\n\":\n output = self._system.execute(self._config, ['--libs'])\n self._libraries = output[1].strip('\\n').split()\n output = self._system.execute(self._config, ['--cflags'])\n self._flags = output[1].strip('\\n').split()\n\n if self._system.compilation_test(self._headers, self._libraries + self._flags):\n self._installed = True\n \n # Not on system so set a local install path\n self._install_path = os.path.join(self._system.get_install_path(), self._name)\n # Now check the local install folder\n if not self._installed:\n if self._is_installed():\n self._installed = True\n else:\n self._installed = False\n self._updated = False",
"def __is_installed() -> bool:\n try:\n check_call(\n [\"bash\", \"-c\", \"command -v keybase\"], stdout=DEVNULL, stderr=DEVNULL\n )\n return True\n except CalledProcessError:\n return False",
"def is_process_started_by_init():\n # The 'init' process has its PID set to 1.\n return os.getppid() == 1",
"def is_available(self):\n try:\n return self.joystick.get_init()\n except:\n return False",
"def _update_is_enabled():\n\n return Path('/etc/cloud/cloud-init-update.enabled').exists()",
"def is_initialized():\n return lib.PAPI_is_initialized()",
"def iswitch_initialized():\n return (path.isdir(get_iswitch_dir_path()) and\n path.islink(get_irods_config_path()) and\n os.readlink(get_irods_config_path()).startswith(get_iswitch_dir_path()+\"/\"))",
"def is_installed(self) -> bool:\n return True",
"def isin_system(self):\n return 'system' in self.flags",
"def checkElastixInitialized():\n \n global Initialized;\n \n if not Initialized:\n raise RuntimeError(\"Elastix not initialized: run initializeElastix(path) with proper path to elastix first\");\n #print ElastixSettings.ElastixBinary;\n\n return True;",
"def is_initialized(self) -> bool:\n return Path(self.plan_dir).exists() and self.is_git_repository()",
"def check_reboot():\n return os.path.exists(\"/run/reboot-required\")",
"def exists(_env):\n return True",
"def test_seattle_is_installed():\n \n if OS == \"Windows\" or OS == \"WindowsCE\":\n\n # Tests if Seattle is set to run at user login.\n # See comments in add_to_win_registry_Current_User_key() for details.\n try:\n Current_User_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n 0, _winreg.KEY_ALL_ACCESS)\n except WindowsError:\n pass\n else:\n Current_User_key_exists = search_value_in_win_registry_key(\n Current_User_key, \"seattle\")\n _winreg.CloseKey(Current_User_key)\n if Current_User_key_exists:\n return True\n\n # Tests if Seattle is set to run at machine startup.\n # See comments in add_to_win_registry_Local_Machine_key() for details.\n try:\n Local_Machine_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n 0, _winreg.KEY_ALL_ACCESS)\n except WindowsError:\n pass\n else:\n Local_Machine_key_exists = search_value_in_win_registry_key(\n Local_Machine_key, \"seattle\")\n _winreg.CloseKey(Local_Machine_key)\n if Local_Machine_key_exists:\n return True\n\n # If neither registry key is present, then test if there is a shortcut\n # to Seattle in the startup folder to determine if Seattle is installed.\n full_startup_file_path,file_path_exists = \\\n get_filepath_of_win_startup_folder_with_link_to_seattle()\n return file_path_exists\n\n elif OS == \"Linux\" or OS == \"Darwin\":\n\n # Check to see if Seattle is being installed on a Nokia tablet.\n #if platform.machine().startswith('armv'):\n # # The full path to the startup script.\n # startup_script_path = \"/etc/init.d/nokia_seattle_startup.sh\"\n # # The full path to the symlink.\n # symlink_path = \"/etc/rc2.d/S99startseattle\"\n # \n # # If the startup script or the symlink exist, then Seattle was installed.\n # return os.path.exists(startup_script_path) or \\\n # os.path.lexists(symlink_path)\n\n #else:\n # Check to see if the crontab has been modified to run seattle.\n crontab_contents_stdout,crontab_contents_stderr = \\\n subprocess.Popen([\"crontab\", \"-l\"], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n return get_starter_file_name() in crontab_contents_stdout\n\n else:\n raise UnsupportedOSError()",
"def needs_init(self):\n return (self._node_device_status and (self._node_device_status.tag == 'down'\n or self._node_device_status.tag == 'unready'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Renvoie True si mot1 est un anagramme de mot2
|
def est_anagramme(mot1,mot2):
return sorted(mot1) == sorted(mot2)
|
[
"def is_anagram(word1, word2):\n return sorted(word1) == sorted(word2)",
"def is_anagram(word1, word2):\n \n word1_list = [i for i in word1.lower() if i != \" \"]\n word2_list = [j for j in word2.lower() if j != \" \"]\n \n word1_list.sort()\n word2_list.sort()\n \n return word1_list == word2_list\n pass",
"def is_it_an_anangram(input1, input2):\n sorted_word1 = sorted(input1)\n sorted_word2 = sorted(input2)\n\n if sorted_word1 == sorted_word2:\n print(\"Yipee, these words are anagrams!\")\n else:\n print(\"Hate to break it to ya, no anagram here\")\n # return (do I need return here?)",
"def anagrams_lst(str1: str, str2: str) -> bool:\n return sorted(str1) == sorted(str2)",
"def is_anagram(s, t):\r\n u = list(s)\r\n v = list(t)\r\n return u.sort() == v.sort()",
"def check_anagram(word):\r\n pass",
"def are_equiv(word1, word2):\r\n if word1.youngtableau().wort == word2.youngtableau().wort:\r\n return True\r\n else:\r\n return False",
"def one_char_different(word1, word2):\n strikes = 1 #three strikes policy, with guilt presumed until proven otherwise\n for i in xrange(len(word1)):\n if word1[i] != word2[i]:\n strikes += 1\n if strikes > 2:\n return False\n\n if strikes > 1:\n return True\n else:\n return False",
"def isAnagram(s, p):\n return sorted(s) == sorted(p)",
"def isAnagram(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n letter_counter = Counter(s)\n for ch in t:\n if ch not in letter_counter:\n return False\n else:\n if letter_counter[ch] <= 0:\n return False\n else:\n letter_counter[ch] -= 1\n return True",
"def is_anagram_of_pal(word):\n\n d = {}\n for char in word:\n if char not in d:\n d[char] = 1\n else:\n d[char] += 1\n\n check = 0\n for amt in d.values():\n if amt % 2 != 0:\n check += 1\n if check > 1:\n return False\n\n return True",
"def is_permutation_v2(string1, string2):\n\tstring1_dict = str_count_dict(string1)\n\tstring2_dict = str_count_dict(string2)\n\n\tif string1_dict == string2_dict:\n\t\treturn True\n\treturn False",
"def like(s1, s2):\n s1_normed = normalise(s1)\n for s in s2:\n if s in s1_normed:\n return True\n return False",
"def is_anagram_of_palindrome(word):\n\n letters = {}\n\n for char in word:\n # Load the counts of each char into the dictionary\n letters[char] = letters.get(char, 0) + 1\n\n odd = False\n\n for count in letters.values():\n if count % 2 != 0:\n if odd:\n return False\n odd = True\n\n return True",
"def isAnagram(s, p):\n counter = {}\n for c in s:\n counter[c] = counter.get(c, 0) + 1\n for c in p:\n if c not in counter:\n return False\n counter[c] -= 1\n if counter[c] < 0:\n return False\n return all(x == 0 for x in counter.values())",
"def check_anagram(s, t):\r\n\r\n # firstly rule out obvious unmatching cases\r\n if t==None:\r\n return None\r\n\r\n if len(t) > len(s):\r\n return False \r\n\r\n s = s.lower() # make all letters lowercase\r\n t = t.lower()\r\n\r\n sort_t = sorted(t) # merge sort t\r\n\r\n n = 0 # set counter\r\n for i in range(len(s)-len(t)+1): # scan the same length of t through s string\r\n slice = s[i:(i+len(t))]\r\n sort_slice = sorted(slice) # merge sort string slice from s\r\n if sort_slice == sort_t: # compare sorted strings\r\n return True\r\n n+=1\r\n break\r\n\telse:\r\n continue\t\r\n\r\n if n > 0:\r\n return True\r\n else:\r\n return False",
"def are_equivalent_atoms(gra, atm1_key, atm2_key, stereo=True, dummy=True):\n gra1 = set_atom_symbols(gra, {atm1_key: 'Ts'})\n gra2 = set_atom_symbols(gra, {atm2_key: 'Ts'})\n are_equiv = bool(isomorphism(gra1, gra2, stereo=stereo, dummy=dummy))\n return are_equiv",
"def checkScramble(word1, word2, useAll=False, onlyTiles=False):\n\tw1 = letterCount(word1)\n\tw2 = letterCount(word2)\n\tif useAll: #if the word must use all of the tiles\n\t\treturn w1 == w2\n\telse: #if the second word can be made from any number of the tiles\n\t\tfor k,v in w2.items():\n\t\t\tif onlyTiles: #if the word must only use any num of tiles\n\t\t\t\tif k not in w1.keys():\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif k not in w1.keys() or w2[k] > w1[k]:\n\t\t\t\t\treturn False\n\treturn True",
"def issimilar(letter1: str, letter2: str) -> bool:\n return letter1 == letter2 or any(\n [letter1 in x and letter2 in x for x in SIMILAR_LETTERS]\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Renvoie la liste des anagrammes de mot
|
def anagrammes(mot, hashTable):
li = hashTable.get(prehash(mot))
result = []
for m in li:
if est_anagramme(m, mot):
result.append(m)
return result
|
[
"def test_find_anagram_phrases(self):\n dict_file = os.path.abspath('tests/data/ch03/dictionary.txt')\n word_list = cleanup_dict(dict_file)\n word_list = cleanup_list_more(word_list)\n anagram_dict = anagram_generator.get_anagram_dict(word_list)\n # Test a word without anagrams.\n phrases, phrase = [], []\n anagram_generator.find_anagram_phrases(phrases, 'ttr', anagram_dict, phrase)\n self.assertListEqual([], phrases)\n # Test a phrase with four anagram phrases.\n anagram_generator.find_anagram_phrases(phrases, 'a cat', anagram_dict, phrase)\n self.assertListEqual(['a act', 'a cat', 'act a', 'cat a'], phrases)",
"def find_anagrams(s):\n word_list = sorted(s)\n anagrams_list = []\n find_anagrams_helper(word_list, anagrams_list, [], [])\n print(f'{count} anagrams: {anagrams_list}')",
"def unscramble_words(scrambled_words, word_list):\n output = []\n for i in scrambled_words:\n for k in word_list:\n if len(i) > len(k):\n if anagram(i, k):\n output.append(k)\n else:\n if(anagram(k, i)):\n output.append(k)\n print(output)\n return output",
"def getAnagrams(wordList, string):\n node = getAnagramNode(wordList, string)\n if node is None:\n return []\n else:\n return radixSort(getAnagramsAux(node))",
"def find_anagrams_helper(word_list, anagrams_list, ans_lst, index_list):\n global count\n\n if len(word_list) == len(ans_lst):\n word = string_manipulation(ans_lst)\n if word in words:\n if word not in anagrams_list:\n print(f'Found: {word}')\n print('Searching...')\n anagrams_list.append(word)\n count += 1\n else:\n for i in range(len(word_list)):\n if i not in index_list:\n # Choose\n index_list.append(i)\n ans_lst.append(word_list[i])\n word = string_manipulation(ans_lst)\n if has_prefix(word):\n # Explore\n find_anagrams_helper(word_list, anagrams_list, ans_lst, index_list)\n # Un-choose\n index_list.pop()\n ans_lst.pop()",
"def all_anagrams_list(words_list):\r\n anagrams_dict = all_anagrams_dict(words_list)\r\n anagrams_list = list(anagrams_dict.values())\r\n anagrams_list.sort(key = lambda L: -len(L))\r\n return anagrams_list",
"def _get_anagram_list(self, s, iteration_num = 1, previous_words = []):\n master_lst = [] #represents the list of all generated anagrams.\n old_word_lst = self._list #copy the _list used in the previous function call.\n self._shorten_list(s) #shorten _list to include only \"anagram-able\" words in\n #respect to input string s.\n\n for inclusion_report in self._list: #loop through the shortened list.\n\n word = inclusion_report[0]\n remaining_characters = inclusion_report[1]\n\n if not remaining_characters: #BASE CASE: the tested word is an exact match for s.\n test = previous_words + [word] \n if test not in master_lst:\n master_lst.append(test) #Add anagram to the list.\n if self._n == iteration_num:\n self._list = old_word_lst #Restore _lst to its previous version\n return master_lst\n else:\n self._n += 1 #Indicate another anagram has been found.\n\n elif remaining_characters: #RECURSIVE CASE: the tested word has extra characters.\n sub_list_grab = self._get_anagram_list(remaining_characters, iteration_num, previous_words + [word])\n if sub_list_grab: #If anything was found...\n for anagram in sub_list_grab:\n if anagram not in master_lst:\n master_lst.append(anagram)\n if self._n == iteration_num:\n self._list = old_word_lst #Restore _lst to its previous version\n return master_lst\n\n if inclusion_report == self._list[-1]: #If the entire list has been looped through...\n self._list = old_word_lst #Restore _lst to its previous version\n return master_lst\n\n #Activated iff self._lst == []\n self._list = old_word_lst #Restore _lst to its previous version\n return master_lst",
"def test_find_anagrams(self):\n dict_file = os.path.abspath('tests/data/ch03/dictionary.txt')\n word_list = cleanup_dict(dict_file)\n word_list = cleanup_list_more(word_list)\n anagram_dict = anagram_generator.get_anagram_dict(word_list)\n # Test a word without anagrams.\n anagrams = []\n test_list = anagram_generator.find_anagrams('ttr', anagram_dict)\n self.assertListEqual(anagrams, test_list)\n # Test a word with anagrams.\n anagrams = ['set', 'test', 'tet']\n test_list = anagram_generator.find_anagrams('test', anagram_dict)\n self.assertListEqual(anagrams, test_list)\n # Test a phrase.\n phrase = 'tip tap'\n anagrams = ['a', 'apt', 'at', 'i', 'it', 'pap', 'pat', 'patti', 'pip',\n 'pit', 'pita', 'pitt', 'tap', 'tat', 'tia', 'tip', 'tit']\n test_list = anagram_generator.find_anagrams(phrase, anagram_dict)\n self.assertListEqual(anagrams, test_list)\n # Test that it ignores uppercase.\n anagrams = ['joe', 'jose', 'so']\n test_list = anagram_generator.find_anagrams('Jose', anagram_dict)\n self.assertListEqual(anagrams, test_list)",
"def est_anagramme(mot1,mot2):\r\n return sorted(mot1) == sorted(mot2)",
"def all_anagrams(data):\n box = {}\n\n for i in range(len(data)):\n #print i\n s = sorts(data[i])\n\n if s in box:\n box[s].append(data[i])\n else:\n box[s] = [data[i]]\n return box",
"def anagrams(self) -> None:\n # Generate an instance of StdIn.\n reader: StdIn = StdIn()\n\n # Read stdin and create set of words for anagrams.\n words: list[str] = reader.string()\n words_set = set(\" \".join(words).split())\n\n # If the input is empty, just pass in the empty set.\n if len(words_set) == 0:\n words_set = {\"\"}\n\n # Call the function.\n result: list[list[str]] = anagrams(words_set)\n\n # Print results to stdout.\n print(result)",
"def group_anagram(strs):\n ana = {}\n for string in strs:\n s = ''.join(sorted(string))\n if s in ana:\n ana[s].append(string)\n else:\n ana[s] = [string]\n return [ana[x] for x in ana]",
"def anagrams(list_of_str , string):\n \n occurrences_string = get_occurrences(string)\n \n for element in list_of_str:\n \n if get_occurrences(element) != occurrences_string:\n return False\n \n return True",
"def find_anagrams(words):\n anagrams = {}\n\n for word in words:\n anagrams.setdefault(alphabetize(word), [word])\n if word not in anagrams[alphabetize(word)]:\n anagrams[alphabetize(word)].append(word)\n\n return anagrams",
"def words_with_anagrams(list1,list2):\n\n # Sort in anagram order\n for i in range(len(list1)):\n list1[i] = anagram_counting_sort(list1[i],True)\n \n for j in range(len(list2)):\n list2[j] = anagram_counting_sort(list2[j],False)\n\n # Run radix sort\n list1 = optimized_radix_sort_task2(list1)\n list2 = optimized_radix_sort_task2(list2)\n\n # Remove duplicates in list2\n list2 = remove_duplicates(list2)\n\n pointer_right = 0\n pointer_left = 0\n res = []\n\n # Compares elements of list1 with elements of list2\n while pointer_left < len(list1) and pointer_right < len(list2):\n left_item = list1[pointer_left]\n right_item = list2[pointer_right]\n \n # Perform length comparison\n if len(right_item[0]) > len(left_item[0]):\n pointer_left += 1\n elif len(left_item[0]) > len(right_item[0]):\n pointer_right += 1\n else: \n if left_item[0] == right_item[0]:\n res.append(left_item[1])\n pointer_left += 1\n else: \n # Perform character comparison\n for i in range(len(left_item[0])): \n if left_item[0][i] > right_item[0][i]:\n pointer_right += 1\n break\n elif left_item[0][i] < right_item[0][i]:\n pointer_left += 1\n break\n \n return res",
"def is_anagram(word1, word2):\n \n word1_list = [i for i in word1.lower() if i != \" \"]\n word2_list = [j for j in word2.lower() if j != \" \"]\n \n word1_list.sort()\n word2_list.sort()\n \n return word1_list == word2_list\n pass",
"def detect_anagrams(word, candidates):\n lower = word.lower()\n counts = Counter(lower)\n\n def yield_anagram():\n \"\"\"\n return the anagrams of word one by one\n \"\"\"\n for candidate in candidates:\n lower_candidate = candidate.lower()\n if Counter(lower_candidate) == counts:\n if lower_candidate != lower:\n yield candidate\n\n return list(yield_anagram())",
"def find_matching_words(anagram, word_list):\r\n pass",
"def anagram(main_str, str_list):\n return [_str for _str in str_list if str_list and Counter(_str) == Counter(main_str)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setup a list of rigid link assemblies (RA)
|
def rigidLinkAssemblies(self):
# allocate(RAm1(1:Init%NElem)) ! NOTE: do not deallocate, this is an "output" of this function
# RAm1(1:Init%NElem) = -1
#
# --- Establish a list of rigid link elements
Er = [e for e in self.Elements if e.data['TypeID']==idMemberRigid ]
EIDr = [e.ID for e in self.Elements if e.data['TypeID']==idMemberRigid ]
print(' Rigid Elements ',EIDr)
RA = []
while len(EIDr)>0:
# Creating List Ea of elements of a given assembly
EIDa =[]
id0 = EIDr.pop()
EIDa.append(id0)
e0 = self.getElement(id0)
addNeighbors(self, e0, EIDr, EIDa)
print(' Rigid Assembly, element IDs: ', EIDa)
RA.append(EIDa)
return RA
|
[
"def setup(self):\n self._rl_modules = {}\n self.__check_module_configs(self.config.modules)\n for module_id, module_spec in self.config.modules.items():\n self._rl_modules[module_id] = module_spec.build()",
"def _load_links(self) -> NoReturn:\n total = self.project_size[2]\n self._links = {\n self.object_name(shared_enum.ElementType.LINK, index): index\n for index in range(total)\n }",
"def rebuild(self, links):\n self.links = []\n for link in links:\n self.add(link.get(\"type\"), link.get(\"target\"))",
"def _all_assemblies(self):\n return itertools.chain(\n # Complete assembly is always first\n (self.complete_assembly,),\n self.orphan_assemblies,\n (model.assembly for group, model in self._all_models()\n if model.assembly),\n (step.assembly for step in self._all_protocol_steps()\n if step.assembly),\n (step.assembly for step in self._all_analysis_steps()\n if step.assembly),\n (restraint.assembly\n for restraint in self._all_restraints() if restraint.assembly))",
"def register_all_links():\n\n # all proficient human datasets\n ph_tasks = [\"lift\", \"can\", \"square\", \"transport\", \"tool_hang\", \"lift_real\", \"can_real\", \"tool_hang_real\"]\n ph_horizons = [400, 400, 400, 700, 700, 1000, 1000, 1000]\n for task, horizon in zip(ph_tasks, ph_horizons):\n register_dataset_link(task=task, dataset_type=\"ph\", hdf5_type=\"raw\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/demo.hdf5\".format(task))\n # real world datasets only have demo.hdf5 files which already contain all observation modalities\n # while sim datasets store raw low-dim mujoco states in the demo.hdf5\n if \"real\" not in task:\n register_dataset_link(task=task, dataset_type=\"ph\", hdf5_type=\"low_dim\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/low_dim.hdf5\".format(task))\n register_dataset_link(task=task, dataset_type=\"ph\", hdf5_type=\"image\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/image.hdf5\".format(task))\n\n # all multi human datasets\n mh_tasks = [\"lift\", \"can\", \"square\", \"transport\"]\n mh_horizons = [500, 500, 500, 1100]\n for task, horizon in zip(mh_tasks, mh_horizons):\n register_dataset_link(task=task, dataset_type=\"mh\", hdf5_type=\"raw\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/demo.hdf5\".format(task))\n register_dataset_link(task=task, dataset_type=\"mh\", hdf5_type=\"low_dim\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/low_dim.hdf5\".format(task))\n register_dataset_link(task=task, dataset_type=\"mh\", hdf5_type=\"image\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/image.hdf5\".format(task))\n\n # all machine generated datasets\n for task, horizon in zip([\"lift\", \"can\"], [400, 400]):\n register_dataset_link(task=task, dataset_type=\"mg\", hdf5_type=\"raw\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/demo.hdf5\".format(task))\n register_dataset_link(task=task, dataset_type=\"mg\", hdf5_type=\"low_dim_sparse\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_sparse.hdf5\".format(task))\n register_dataset_link(task=task, dataset_type=\"mg\", hdf5_type=\"image_sparse\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/image_sparse.hdf5\".format(task))\n register_dataset_link(task=task, dataset_type=\"mg\", hdf5_type=\"low_dim_dense\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_dense.hdf5\".format(task))\n register_dataset_link(task=task, dataset_type=\"mg\", hdf5_type=\"image_dense\", horizon=horizon,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/image_dense.hdf5\".format(task))\n\n # can-paired dataset\n register_dataset_link(task=\"can\", dataset_type=\"paired\", hdf5_type=\"raw\", horizon=400,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/demo.hdf5\")\n register_dataset_link(task=\"can\", dataset_type=\"paired\", hdf5_type=\"low_dim\", horizon=400,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/low_dim.hdf5\")\n register_dataset_link(task=\"can\", dataset_type=\"paired\", hdf5_type=\"image\", horizon=400,\n link=\"http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/image.hdf5\")",
"def doScaffolding(self, pool):\r\n for lib in pool.libs:\r\n if lib.sequencingPlatform == \"pacbio\":\r\n self.assembly = PBJelly.PBJelly(pool.outputDir + \"scaffolds/\", assembly=self.assembly, reads=lib.forward).execute()",
"def initialize(self, arms: List[str]):\n pass",
"def setArrs(self, arrs):\n self.sequential_model.setArrs(arrs)",
"def load_modules():\r\n with open(os.path.join(\"lib\", \"agents\", \"agents.yaml\")) as f: # TODO save all paths somewhere centralized\r\n global available_agents\r\n data = yaml.load(f, Loader=SafeLoader)\r\n available_agents = [AgentModule(data[a][\"name\"], data[a][\"path\"], data[a][\"filename\"], data[a][\"type\"]) for a in data]\r\n\r\n with open(os.path.join(\"lib\", \"listeners\", \"listeners.yaml\")) as f: # TODO save all paths somewhere centralized\r\n global available_listeners\r\n data = yaml.load(f, Loader=SafeLoader)\r\n available_listeners = [ListenerModule(data[a][\"name\"], data[a][\"file\"]) for a in data]\r\n\r\n with open(os.path.join(\"common\", \"post\", \"modules.yaml\")) as f: # TODO save all paths somewhere centralized\r\n global available_post_modules\r\n data = yaml.load(f, Loader=SafeLoader)\r\n available_post_modules = [ScriptModule(data[s][\"name\"], data[s][\"filename\"], data[s]['command'], data[s]['type'], data[s][\"description\"]) for s in data]",
"def test_setup_samples(self):\n flist = find_samples(j_doe_00_05)\n for f in flist:\n setup_sample(f, **{'analysis':'Align_standard_seqcap', 'genome_build':'rn4', 'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'num_cores':8, 'distributed':False})\n for f in flist:\n with open(f, \"r\") as fh:\n config = yaml.load(fh)\n if config[\"details\"][0].get(\"multiplex\", None):\n self.assertEqual(config[\"details\"][0][\"multiplex\"][0][\"genome_build\"], \"rn4\")\n else:\n self.assertEqual(config[\"details\"][0][\"genome_build\"], \"rn4\")\n\n with open(f.replace(\"-bcbb-config.yaml\", \"-post_process.yaml\")) as fh:\n config = yaml.load(fh)\n self.assertEqual(config[\"custom_algorithms\"][ANALYSIS_TYPE][\"hybrid_bait\"], 'rat_baits.interval_list')\n self.assertEqual(config[\"custom_algorithms\"][ANALYSIS_TYPE][\"hybrid_target\"], 'rat_targets.interval_list')\n self.assertEqual(config[\"algorithm\"][\"num_cores\"], 8)\n \n for f in flist:\n setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,\n 'no_only_run':True, 'google_report':True,\n 'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':False})\n with open(f, \"r\") as fh:\n config = yaml.load(fh)\n if config[\"details\"][0].get(\"multiplex\", None):\n self.assertEqual(config[\"details\"][0][\"multiplex\"][0][\"genome_build\"], \"rn4\")\n else:\n self.assertEqual(config[\"details\"][0][\"genome_build\"], \"rn4\")\n with open(f.replace(\"-bcbb-config.yaml\", \"-post_process.yaml\")) as fh:\n config = yaml.load(fh)\n self.assertEqual(config[\"algorithm\"][\"mark_duplicates\"], False)\n self.assertEqual(config[\"custom_algorithms\"][ANALYSIS_TYPE][\"mark_duplicates\"], False)",
"def RAElimination(self, RA):\r\n Elements = [self.getElement(eid) for eid in RA]\r\n # --- List of nodes stored first \r\n #print('>>> Elements',Elements)\r\n Nodes = self.elements2nodes(Elements)\r\n INodesID = [n.ID for n in Nodes]\r\n print(' Nodes involved in assembly (unique)', INodesID)\r\n #--- Look for potential interface node\r\n NodesInterf = []\r\n for iNodeID, (node,nodeID) in enumerate(zip(Nodes, INodesID)):\r\n if 'IBC' in node.data.keys():\r\n print(' Node',nodeID, ' is an interface node, selecting it for the rigid assembly')\r\n NodesInterf.append(nodeID)\r\n # --- Decide which node will be the main node of the rigid assembly\r\n if (len(NodesInterf)==0):\r\n iiMainNode = 0 # By default we select the first node\r\n elif (len(NodesInterf)==1):\r\n # Finding the index of the interface node\r\n idMainNode = NodesInterf[0]\r\n iiMainNode = INodesID.index(idMainNode)\r\n else:\r\n raise Exception('Cannot have several interface nodes linked within a same rigid assembly')\r\n print(' Selecting node ID ',INodesID[iiMainNode], 'to be the main node for the rigid assembly')\r\n # --- Order list of joints with main node first (swapping iMainNode with INodes(1))\r\n iTmp = INodesID[0]\r\n INodesID[0] = INodesID[iiMainNode]\r\n INodesID[iiMainNode] = iTmp\r\n print(' Nodes involved in assembly (after select):',INodesID)\r\n # --- Building Transformation matrix\r\n nNodes = len(INodesID)\r\n Tc = np.zeros((6*nNodes,6))\r\n # I6 for first node since it's the \"leader\"\r\n Tc[:6,:6]=np.eye(6)\r\n # Rigid transformation matrix for the other nodes \r\n P1 = self.getNode(INodesID[0]).point # reference node coordinates\r\n for i,nID in enumerate(INodesID[1:]):\r\n Pi = self.getNode(nID).point # follower node coordinates\r\n T_r = rigidTransformationTwoPoints(P1, Pi)\r\n Tc[ (i+1)*6:(i+2)*6, :] = T_r\r\n #print('Rigid transformation from ref point',P1,' to ',Pi, T_r)\r\n return Tc, INodesID",
"def _set_explicit_linkers(self, linkers, old_linker):\n if isinstance(linkers, str):\n self._linker(linkers)\n else:\n for linker in linkers:\n self._linker(linker)\n self.linker = old_linker",
"def set_libraries (self, libnames):\r\n self.libraries = copy (libnames)",
"def link_assembly(ass):\n dirname = assembly_dir(ass)\n source = os.path.join(\"ncbi\", dirname, dirname + \"_genomic.fna.gz\")\n print('Source:', source, 'Ass:', ass)\n if os.path.exists(source):\n dest = os.path.join(\"assemblies\", assembly_name(ass) + \".fasta.gz\")\n if not os.path.exists(\"assemblies\"):\n os.mkdir(\"assemblies\")\n if not os.path.exists(dest):\n print('Link from', source, 'to', dest)\n os.symlink(os.path.join(\"..\", source), dest)",
"def addLinks(self, links):\n self.links = links",
"def Nu_importAllRefs() :\n\tsysPath = 'O:/studioTools/maya/python/tool/rig/nuTools/pipeline'\n\tif not sysPath in sys.path : \n\t\tsys.path.append(sysPath)\n\n\timport pipeTools\n\treload(pipeTools)\n\t\n\n\tpipeTools.importAllRefs()",
"def startup(self):\n self._startup = True\n for cam in self.cameras_list:\n cam.startup()\n self._startup = self._startup & cam._startup",
"def __init_libs(self):\n\n libs_path = os.path.join(main_utils.get_files_dir(), \"data\")\n cmd = \"\"\n\n libs_mapping = {\n \"libwireshark.so\": [\n \"libwireshark.so.6\", \"libwireshark.so.6.0.1\"], \"libwiretap.so\": [\n \"libwiretap.so.5\", \"libwiretap.so.5.0.1\"], \"libwsutil.so\": [\n \"libwsutil.so.6\", \"libwsutil.so.6.0.0\"]}\n for lib in libs_mapping:\n for sym_lib in libs_mapping[lib]:\n # if not os.path.isfile(os.path.join(libs_path,sym_lib)):\n if True:\n # TODO: chown to restore ownership for the symlinks\n cmd = cmd + \" ln -s \" + \\\n os.path.join(libs_path, lib) + \" \" + os.path.join(libs_path, sym_lib) + \"; \"\n\n exes = [\"diag_revealer\",\n \"diag_revealer_mtk\",\n \"android_pie_ws_dissector\",\n \"android_ws_dissector\"]\n for exe in exes:\n cmd = cmd + \" chmod 755 \" + os.path.join(libs_path, exe) + \"; \"\n\n cmd = cmd + \"chmod -R 755 \" + libs_path\n main_utils.run_shell_cmd(cmd)",
"def config_armies(filename: str) -> None:\n game = Game()\n reader = Reader()\n armies = reader.read(filename)\n game.start_step = reader.start_from_step\n for army in armies:\n game.add_army(army)\n game.start()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The neighborelements of element e0 (that are found within the list Er) are added to the list Ea
|
def addNeighbors(self, e0, EIDr, EIDa) :
#print('----------------------------------------------------')
#print('>>> Looking for neighbors of ',e0.ID, 'within',EIDr)
if len(EIDr)==0:
return EIDa
EIDn =[] # List of neighbors of e0
# Loop through all elements, setup list of e0-neighbors, add them to Ea, remove them from Er
commonEIDs=[]
for idk in EIDr:
ek = self.getElement(idk)
#print('Looking if ', e0.ID, 'and', idk, 'are directly connected.')
if self.areElementsConnected(e0, ek):
#print(' YES, ElementID',idk, 'is connected to elementID', e0.ID)
commonEIDs.append(idk)
#else:
# print(' NO')
# Remove element from Er (a rigid element can belong to only one assembly)
#print('EIDr is ',EIDr)
for idk in commonEIDs:
EIDr.remove(idk)
#print('removing ',idk,'EIDr is ',EIDr)
EIDn.append(idk) # adding to neighbors
EIDa.append(idk) # adding to assembly
#print('>>> The neighbors of ',e0.ID, 'are', EIDn)
#print('')
# Loop through neighbors and recursively add neighbors of neighbors
if len(EIDr)>0:
for idk in EIDn:
ek = self.getElement(idk)
EIDa = addNeighbors(self, ek, EIDr, EIDa)
return EIDa
|
[
"def augVertexListHelper(self, edgesConsidered):\n vertexLists = {key : [] for key in self.complex.oneCells} # return a list of the vertices with more information about them\n\n stackingVertexDict = {generator : \\\n {vertex: StackingVertex(vertex, [], [], [], []) for vertex in self.vertexList} \\\n for generator in self.complex.oneCells}\n \n\n ordering = {}\n for i in range(0, len(self.vertexList)):\n v = self.vertexList[i]\n ordering[v] = i\n for e in edgesConsidered:\n if not e.inverse:\n if ordering[e.initVertex] < ordering[e.destVertex]:\n stackingVertexDict[e.generator][e.initVertex].higherOutgoing.append(e.destVertex)\n stackingVertexDict[e.generator][e.destVertex].lowerIncoming.append(e.initVertex)\n else:\n stackingVertexDict[e.generator][e.initVertex].lowerOutgoing.append(e.destVertex)\n stackingVertexDict[e.generator][e.destVertex].higherIncoming.append(e.initVertex)\n \n else:\n if ordering[e.initVertex] < ordering[e.destVertex]:\n stackingVertexDict[e.generator][e.initVertex].higherIncoming.append(e.destVertex)\n stackingVertexDict[e.generator][e.destVertex].lowerOutgoing.append(e.initVertex)\n else:\n stackingVertexDict[e.generator][e.initVertex].lowerIncoming.append(e.destVertex)\n stackingVertexDict[e.generator][e.destVertex].higherOutgoing.append(e.initVertex)\n\n for i in range(0, len(self.vertexList)):\n for key in vertexLists.keys():\n vertexLists[key].append(stackingVertexDict[key][self.vertexList[i]])\n \n self.augVertexList = vertexLists",
"def __add_neighbours(self):\n calculate_cell_neighbour_coordinates = self._neighbourhood.calculate_cell_neighbour_coordinates\n coordinates = self._current_state.keys()\n for coordinate, cell_c, cell_n in zip(coordinates, self._current_state.values(), self._next_state.values()):\n n_coord = calculate_cell_neighbour_coordinates(\n coordinate, self._dimension)\n cell_c.neighbours = list([self._current_state[nc]\n for nc in n_coord])\n cell_n.neighbours = list([self._next_state[nc] for nc in n_coord])",
"def add_egdes_from_node_list(G, nodelist, temppre=True):\n #only first source is taken if more than one source exists\n source_name = nodelist[0][0]\n node_tuple = nodelist[1]\n nG = G\n for index, node in enumerate(node_tuple):\n current_node = node\n next_node = node_tuple[(index + 1) % len(node_tuple)]\n if next_node == node_tuple[0]:\n break\n \n #no loop\n if current_node == next_node:\n pass\n else:\n if temppre:\n nG.add_edge(current_node, next_node, weight=1.0, source=source_name)\n else:\n #tempsyn elements get another weight\n nG.add_edge(current_node, next_node, weight=5.0, source=source_name)\n return nG",
"def naeps(G=None, E=None):\n if G:\n E = G.E\n naeps = []\n for i in range(len(E)):\n for j in range(i + 1, len(E)):\n if not adj(E[i], E[j]):\n naeps.append((E[i], E[j]))\n return naeps",
"def aretes_vers_liste_adjacence(n,aretes):\n\n adjacence = [ [] for i in range(n) ] #liste contenant n listes vides\n for arete in aretes:\n adjacence[ arete[0] ].append( arete[1] )\n adjacence[ arete[1] ].append( arete[0] )\n\n return adjacence",
"def add_edge_table(self, etab):\n add = ([],[]) # list of edges and h-edges to add\n remove = [] # list of edges to remove\n for (v1,v2),(n1,n2) in etab.items():\n conn_type = self.edge_type(self.edge(v1,v2))\n if conn_type == 1: n1 += 1 #and add to the relevant edge count\n elif conn_type == 2: n2 += 1\n \n t1 = self.type(v1)\n t2 = self.type(v2)\n if (t1 == 1 and t2 == 1) or (t1 == 2 and t2 == 2): #types are ZX & equal,\n n1 = bool(n1) #so normal edges fuse\n pairs, n2 = divmod(n2,2)#while hadamard edges go modulo 2\n self.scalar.add_power(-2*pairs)\n if n1 != 0 and n2 != 0: #reduction rule for when both edges appear\n new_type = 1\n self.add_to_phase(v1, 1)\n self.scalar.add_power(-1)\n elif n1 != 0: new_type = 1\n elif n2 != 0: new_type = 2\n else: new_type = 0\n elif (t1 == 1 and t2 == 2) or (t1 == 2 and t2 == 1): #types are ZX & different\n pairs, n1 = divmod(n1,2)#so normal edges go modulo 2\n n2 = bool(n2) #while hadamard edges fuse\n self.scalar.add_power(-2*pairs)\n if n1 != 0 and n2 != 0: #reduction rule for when both edges appear\n new_type = 2\n self.add_to_phase(v1, 1)\n self.scalar.add_power(-1)\n elif n1 != 0: new_type = 1\n elif n2 != 0: new_type = 2\n else: new_type = 0\n elif (t1 == 1 and t2 == 3) or (t1 == 3 and t2 == 1): # Z & H-box\n n1 = bool(n1)\n if n1 + n2 > 1:\n raise ValueError(\"Unhandled parallel edges between nodes of type (%s,%s)\" % (t1,t2))\n else:\n if n1 == 1: new_type = 1\n elif n2 == 1: new_type = 2\n else: new_type = 0\n else:\n if n1 + n2 > 1:\n raise ValueError(\"Unhandled parallel edges between nodes of type (%s,%s)\" % (t1,t2))\n else:\n if n1 == 1: new_type = 1\n elif n2 == 1: new_type = 2\n else: new_type = 0\n\n\n if new_type != 0: # They should be connected, so update the graph\n if conn_type == 0: #new edge added\n add[new_type-1].append((v1,v2))\n elif conn_type != new_type: #type of edge has changed\n self.set_edge_type(self.edge(v1,v2), new_type)\n elif conn_type != 0: #They were connected, but not anymore, so update the graph\n remove.append(self.edge(v1,v2))\n\n self.remove_edges(remove)\n self.add_edges(add[0],1)\n self.add_edges(add[1],2)",
"def get_adjacency_list(self):\n edgeList = [None] * (len(self.nodes) + 1)\n for edge in self.edges:\n #print edge.node_from.value -100\n if edgeList[edge.node_from.value] == None:\n edgeList[edge.node_from.value] = []\n edgeList[edge.node_from.value].append( (edge.node_to.value, edge.value) ) \n return edgeList",
"def setNeighbors(self):\n \n self.nb = {}\n for a1 in self.data:\n ind1 = self.data.index(a1)\n nbd = {}\n nbd[ind1] = 0\n #nblist = self.nb[ind1] = [ind1]\n self.energy_matrix[ind1, ind1] = 0\n #set 1-2 interactions to 0\n for b in a1.bonds:\n a2 = b.atom1\n if id(a2)==id(a1): a2 = b.atom2\n if a2.number > a1.number:\n #then do something here\n ind2 = self.data.index(a2)\n nbd[ind2] = 0\n self.energy_matrix[ind1, ind2] = 0\n self.energy_matrix[ind2, ind1] = 0\n #set 1-3 interactions to 0\n for b2 in a2.bonds:\n a3 = b2.atom1\n if id(a3)==id(a2): a3 = b2.atom2\n if id(a3)==id(a1): continue\n if a3.number > a1.number:\n #then do something here\n ind3 = self.data.index(a3)\n nbd[ind3] = 0\n self.energy_matrix[ind1, ind3] = 0\n self.energy_matrix[ind3, ind1] = 0\n #set 1-4 interactions to 0\n #if rotatable, skip 1-4s for b2\n if b2.rotatable:\n #print 'skipping ', b2.atom1.name, '-', b2.atom2.name\n continue\n for b3 in a3.bonds:\n #if hasattr(b3, 'activeTors') and b3.activeTors: continue\n a4 = b3.atom1\n if id(a4)==id(a3): a4 = b3.atom2\n if id(a4)==id(a2): continue\n if id(a4)==id(a1): continue\n if a4.number > a1.number:\n #then do something here\n ind4 = self.data.index(a4)\n nbd[ind4] = 0\n self.energy_matrix[ind1, ind4] = 0\n self.energy_matrix[ind4, ind1] = 0\n self.nb[ind1] = list(nbd.keys())",
"def _addNeighsToQueue(self, neighs):\n for n in neighs:\n if (n in self.weightMatrix) or (n in self.seenPos):\n pass\n else:\n self.seenPos.add(n)\n self.queue.insert(n)",
"def add_edge(self, edge):\n\t\tedge = set(edge)\n\t\t(vertex, neighbor) = tuple(edge)\n\t\tif vertex not in self.g:\n\t\t\tself.g[vertex] = [neighbor]\n\t\telse:\n\t\t\tself.g[vertex].append(neighbor)\n\t\tprint \"Added Edge : {}\".format(edge)",
"def adding_nodes(self):\n \n for node in self.vertex:\n i = 0\n if node not in self.queue:\n self.queue.append(node)\n\n for neigbor in self.neighbors:\n if node == neigbor[0]:\n if neigbor[-1] not in self.queue:\n self.queue.append(neigbor[-1])\n \n self.visited.append(self.queue.pop(i))\n\n return self.visited",
"def add_edges(self, *nodes):\n for node in nodes:\n self.adjacent.add(node)\n node.adjacent.add(self)",
"def add_edges_from(self, ebunch):\n for (source, target, new_attr) in ebunch:\n self.add_edge(source, target, new_attr)",
"def add_neighbor_atom_indices(self, indices):\n for index in indices:\n if index not in self.indices_of_atoms_connecting:\n self.indices_of_atoms_connecting.append(index)",
"def two_nearest_neighbors(G, e1, e2):\n\n e1n = G.neighbors(e1)\n e2n = G.neighbors(e2)\n\n list1 = [e for e in list(e1n) if e not in [e1, e2]]\n list2 = [e for e in list(e2n) if e not in [e1, e2]]\n\n print(e1, list1, e2, list2)\n\n assert len(list1) > 1 and len(list2) > 1\n\n glist = []\n\n for i in range(2):\n\n GG = copy.deepcopy(G)\n\n GG.remove_edge(list1[0], e1)\n GG.add_edge(list1[0], e2)\n\n GG.remove_edge(list2[i], e2)\n GG.add_edge(list2[i], e1)\n\n glist.append(GG)\n\n return glist",
"def add_edge(people_list, node1, node2, degree_list = []):\n if len(degree_list) > np.maximum(node1, node2):\n degree_list[node1] += 1\n degree_list[node2] += 1\n people_list[node1].contacts.append(node2)\n people_list[node2].contacts.append(node1)",
"def add_in_edge(self, node_name_0, node_name_1):\n if node_name_0 not in self.graph_proto.edges_in[node_name_1].val:\n self.graph_proto.edges_in[node_name_1].val.append(node_name_0)",
"def combine_data_neighbours(model_data, old_data_point):\n data_point = old_data_point.tolist()\n distances = calculate_neighbours(model_data, data_point)\n data_point.append(distances)\n return data_point",
"def append_edge(self, edge):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
! !> Returns constraint matrix Tc for a rigid assembly (RA) formed by a set of elements. !! x_c = Tc.x_c_tilde !! where x_c are all the DOF of the rigid assembly, and x_c_tilde are the 6 reduced DOF (leader DOF)
|
def RAElimination(self, RA):
Elements = [self.getElement(eid) for eid in RA]
# --- List of nodes stored first
#print('>>> Elements',Elements)
Nodes = self.elements2nodes(Elements)
INodesID = [n.ID for n in Nodes]
print(' Nodes involved in assembly (unique)', INodesID)
#--- Look for potential interface node
NodesInterf = []
for iNodeID, (node,nodeID) in enumerate(zip(Nodes, INodesID)):
if 'IBC' in node.data.keys():
print(' Node',nodeID, ' is an interface node, selecting it for the rigid assembly')
NodesInterf.append(nodeID)
# --- Decide which node will be the main node of the rigid assembly
if (len(NodesInterf)==0):
iiMainNode = 0 # By default we select the first node
elif (len(NodesInterf)==1):
# Finding the index of the interface node
idMainNode = NodesInterf[0]
iiMainNode = INodesID.index(idMainNode)
else:
raise Exception('Cannot have several interface nodes linked within a same rigid assembly')
print(' Selecting node ID ',INodesID[iiMainNode], 'to be the main node for the rigid assembly')
# --- Order list of joints with main node first (swapping iMainNode with INodes(1))
iTmp = INodesID[0]
INodesID[0] = INodesID[iiMainNode]
INodesID[iiMainNode] = iTmp
print(' Nodes involved in assembly (after select):',INodesID)
# --- Building Transformation matrix
nNodes = len(INodesID)
Tc = np.zeros((6*nNodes,6))
# I6 for first node since it's the "leader"
Tc[:6,:6]=np.eye(6)
# Rigid transformation matrix for the other nodes
P1 = self.getNode(INodesID[0]).point # reference node coordinates
for i,nID in enumerate(INodesID[1:]):
Pi = self.getNode(nID).point # follower node coordinates
T_r = rigidTransformationTwoPoints(P1, Pi)
Tc[ (i+1)*6:(i+2)*6, :] = T_r
#print('Rigid transformation from ref point',P1,' to ',Pi, T_r)
return Tc, INodesID
|
[
"def maccormack(U_init,numt,numx,numy,delx,dely,Tw,Tfs,rho_fs,ufs,c_v,c_p,viscfs,Prt,lmbda,R,gamma):\n Un = numpy.zeros((numt+1,4,numx,numy))\n Un[0,:,:,:] = U_init.copy()\n #\n U = U_init.copy()\n #\n Us = U_init.copy()\n #\n for t in range(1,numt+1):\n \t#get properties to calculate fluxes:\n \tT = get_Temperature(U, numx, numy, Tw, Tfs, c_v)\n \tmu = get_visc(T, viscfs, Tfs)\n \tk = get_k(mu, c_p, Prt)\n \t#get shear:\n \tt_xyE = get_tau_xy_Epredict(U, mu, numx, numy, delx, dely )\n \tt_xyF = get_tau_xy_Fpredict(U, mu, numx, numy, delx, dely )\n \tt_xx = get_tau_xx_Epredict(U, mu, numx, numy, delx, dely, lmbda)\n \tt_yy = get_tau_yy_Fpredict(U, mu, numx, numy, delx, dely, lmbda)\n \t#calculate fluxes E, F:\n \tE = get_E_flux_predictor(U, numx, numy, delx, mu, T, k, t_xx, t_xyE, R)\n \tF = get_F_flux_predictor(U, numx, numy, dely, mu, T, k, t_xyF, t_yy, R)\n \t#dt:\n \tdt = get_dt(U, numx, numy, delx, dely, mu, T, gamma, R, Prt)\n \t#Predictor Step:\n \tUs[:,1:-1,1:-1] = U[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E[:,2:,1:-1] - E[:,1:-1,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F[:,1:-1,2:] - F[:,1:-1,1:-1])\n \tUstar = get_BC(Us, T, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \t#update properties:\n \tT2 = get_Temperature(Ustar, numx, numy, Tw, Tfs, c_v)\n \tmu2 = get_visc(T2, viscfs, Tfs)\n \tk2 = get_k(mu2, c_p, Prt)\n \t#update shear:\n \tt_xyE2 = get_tau_xy_Ecorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xyF2 = get_tau_xy_Fcorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xx2 = get_tau_xx_Ecorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \tt_yy2 = get_tau_yy_Fcorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \t#update fluxes:\n \tE2 = get_E_flux_correct(Ustar, numx, numy, delx, mu2, T2, k2, t_xx2, t_xyE2, R)\n \tF2 = get_F_flux_correct(Ustar, numx, numy, dely, mu2, T2, k2, t_xyF2, t_yy2, R)\n \t#corrector step:\n \tUn[t,:,1:-1,1:-1] = 0.5*( U[:,1:-1,1:-1] + Ustar[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E2[:,1:-1,1:-1]-E2[:,:-2,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F2[:,1:-1,1:-1]-F2[:,1:-1,:-2] ))\n \t#\n \tUn[t,:,:,:] = get_BC(Un[t,:,:,:], T2, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \tU = Un[t,:,:,:].copy()\n \t#print(t)\n \tif( numpy.all(numpy.abs(Un[t,0,:,:]-Un[t-1,0,:,:]) < 1e-8) == True ):\n \t\ttt=t+1\n \t\tUn = Un[:tt,:,:,:].copy()\n \t\tmscn = (numpy.trapz(Un[t,1,0,:])/numpy.trapz(Un[t,1,-1,:]))*100\n \t\tprint('Mass is conserved by %.2f percent' % mscn)\n \t\tbreak\n \n return Un",
"def PC_1(T, verbose=False):\n \n # set up constraints into adj matrix form\n num_variables = max([max(t['i'], t['j']) for t in T]) + 1\n mat = [ [ None for i in range(num_variables) ] for j in range(num_variables) ]\n for constr in T:\n i, j, intervals = constr['i'], constr['j'], constr['intervals']\n \n mat[i][j] = intervals\n mat[j][i] = [(-b, -a) for a, b in reversed(intervals)]\n # ^ for reverse, we take each interval (a,b) and convert it into (-b,-a)\n # note we want to also reverse the order of all intervals so that it is again increasing\n \n \n # actual PC-1 core algorithm:\n changed = True\n while changed:\n changed = False\n for k in range(num_variables):\n for i in range(num_variables):\n for j in range(num_variables):\n # T_ij = T_ij + T_ik x T_kj\n\n if mat[i][k] == None or mat[k][j] == None:\n # if we cannot compose\n continue\n elif mat[i][j] == None:\n # if unset it means no constraint, so only compose\n mat[i][j] = compose(mat[i][k], mat[k][j])\n else:\n # intersect&compose\n op = intersect(mat[i][j], compose(mat[i][k], mat[k][j]))\n if mat[i][j] != op:\n mat[i][j] = op\n changed = True\n\n if not mat[i][j]:\n if verbose: print('Unsatisfiability deduced at the PC-1 level.')\n return None\n \n # convert back to standard form used elsewhere:\n S = []\n for i in range(num_variables):\n for j in range(i, num_variables):\n if mat[i][j]:\n S += [{'i': i, 'j': j, 'intervals': mat[i][j]}]\n return S",
"def constraint_matrix(self, fq):\n frozen = self.frozen_intco_list\n\n range_frozen = self.ranged_frozen_intco_list(fq)\n frozen = np.logical_or(frozen, range_frozen)\n\n if np.any(frozen):\n return np.diagflat(frozen)\n else:\n return None",
"def _generate_constraints(self):\n # First check if thermovariables are added to the model\n if not self._var_update:\n self.update_thermo_variables()\n\n rxn_constraints = []\n # Now add reaction variables and generate remaining constraints\n for rxn in self.reactions:\n if rxn.id in self.Exclude_reactions:\n logging.debug(\n \"Reaction {} is excluded from thermodyanmic analysis\".format(rxn.id)\n )\n continue\n\n # Directionality constraint\n dir_f, dir_r = directionality(rxn)\n ind_f, ind_r = delG_indicator(rxn)\n\n rxn_constraints.extend([dir_f, dir_r, ind_f, ind_r])\n\n # Create two different constraints for box method and MIQC method\n\n # delG constraint for box\n concentration_term = sum(\n stoic * metabolite.concentration_variable\n for metabolite, stoic in iteritems(rxn.metabolites)\n if metabolite.equilibrator_accession.inchi_key != PROTON_INCHI_KEY\n )\n\n err_term = sum(\n stoic * metabolite.delG_err_variable\n for metabolite, stoic in iteritems(rxn.metabolites)\n if metabolite.equilibrator_accession.inchi_key != PROTON_INCHI_KEY\n )\n\n lhs_forward = rxn.delG_forward - RT * concentration_term - err_term\n lhs_reverse = rxn.delG_reverse + RT * concentration_term + err_term\n rhs = rxn.delG_prime + rxn.delG_transport\n\n delG_f = self.problem.Constraint(\n lhs_forward,\n lb=rhs,\n ub=rhs,\n name=\"delG_{}\".format(rxn.forward_variable.name),\n )\n\n delG_r = self.problem.Constraint(\n lhs_reverse,\n lb=-rhs,\n ub=-rhs,\n name=\"delG_{}\".format(rxn.reverse_variable.name),\n )\n rxn_constraints.extend([delG_f, delG_r])\n\n return rxn_constraints",
"def SolveTruss(self):\n #check if truss is statically indeterminate\n if (2*self.nSups)+self.nBeams != (2*self.nJoints):\n raise RuntimeError(\"Truss geometry not suitable for static equilibrium\\\n analysis\")\n \n #create angles_arr: row-joints, column-beams, values-angle of beam wrt +x axis\n self.angles_arr = np.zeros((self.nJoints,self.nBeams))\n for i in np.arange(self.nBeams):\n #find the two joints connected to each beam\n joints = np.where(self.beams_arr[:,i] == 1)[0]\n x_coord = self.joints_arr[joints,0]\n y_coord = self.joints_arr[joints,1]\n del_y,del_x = y_coord[1]-y_coord[0], x_coord[1]-x_coord[0]\n alpha = np.arctan2(del_y,del_x) #angle at first joint\n beta = np.pi + alpha #angle at second joint\n self.angles_arr[joints,i] = [alpha,beta]\n\n indR = self.nBeams #index of reaction force\n row,col,data = [],[],[] #store values that help to make csr matrix\n \n #horizontal force balance at each joint\n #for each joint, get the values of elements of the arr that are non-zero\n rhs_h = np.zeros((self.nJoints,1)) #right hand side of equation\n for i in np.arange(self.nJoints):\n beams = np.where(self.beams_arr[i,:] == 1)[0]\n beam_n = np.shape(beams)[0] #number of beams connected to joint\n row.extend([i]*beam_n)\n col.extend(beams)\n angle = self.angles_arr[i,beams]\n data.extend(np.cos(angle))\n if self.joints_arr[i,4] == 1: #for reaction forces at support\n row.append(i)\n col.append(indR)\n data.append(1)\n indR += 1\n rhs_h[i] = self.joints_arr[i,2] #for external forces\n\n #vertical force balance at each joint\n #for each joint, get the values of elements of the arr that are non-zero\n rhs_v = np.zeros((self.nJoints,1))\n for i in np.arange(self.nJoints):\n beams = np.where(self.beams_arr[i,:] == 1)[0]\n beam_n = np.shape(beams)[0]\n row.extend([self.nJoints+i]*beam_n)\n col.extend(beams)\n angle = self.angles_arr[i,beams]\n data.extend(np.sin(angle))\n if self.joints_arr[i,4]:\n row.append(self.nJoints+i)\n col.append(indR)\n data.append(1)\n indR += 1\n rhs_v[i] = self.joints_arr[i,3]\n rhs_arr = np.concatenate((rhs_h,rhs_v),axis = 0)\n \n #create sparse matrix\n sparseM = csr_matrix((data,(row,col)),shape = (self.n,self.n))\n \n try:\n self.solve_F = spsolve(sparseM,rhs_arr)\n except:\n raise RuntimeError(\"Cannot solve the linear system, unstable truss?\")",
"def __create_transfer_coefficient_matrix(self) -> T.Variable:\n num_of_processes = len(self.__id_math_process_dict.keys())\n\n tc_matrix = T.zeros((num_of_processes, num_of_processes))\n\n for _, math_process in self.__id_math_process_dict.items():\n dest_ids, dest_rvs = \\\n math_process.create_outflow_tc_rvs()\n\n origin_ind = math_process.process_ind\n\n dest_inds = [self.__id_math_process_dict[pid].process_ind\n for pid in dest_ids]\n\n tc_matrix = T.set_subtensor(\n tc_matrix[[origin_ind], dest_inds], dest_rvs)\n\n return tc_matrix",
"def _rotation_matrix_from_crota(self):\n return super()._rotation_matrix_from_crota(crota_key='CROTA')",
"def tridiag_solver(a, b, c, d):\n nf = len(d)\n ac, bc, cc, dc = map(list, (a, b, c, d))\n for it in range(1, nf):\n mc = ac[it-1]/bc[it-1]\n bc[it] = bc[it] - mc*cc[it-1]\n dc[it] = dc[it] - mc*dc[it-1]\n\n xc = bc\n xc[-1] = dc[-1]/bc[-1]\n\n for il in range(nf-2, -1, -1):\n xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]\n\n return xc",
"def _thermlc(tautom, theta, deltal, x, jmax, dphdot, bet, c2):\n dphesc = np.zeros(900) # Initialise the output\n a = np.zeros(900); b = np.zeros(900); c = np.zeros(900)\n d = np.zeros(900); alp = np.zeros(900); u = np.zeros(900)\n g = np.zeros(900); gam = np.zeros(900)\n\n #c u(x) is the dimensionless photon occupation number\n c20 = tautom / deltal\n\n #c determine u\n #c define coefficients going into equation\n #c a(j) * u(j + 1) + b(j) * u(j) + c(j) * u(j - 1) = d(j)\n for j in range(1, jmax - 1):\n w1 = np.sqrt( x[j] * x[j + 1] )\n w2 = np.sqrt( x[j - 1] * x[j] )\n #c w1 is x(j + 1 / 2)\n #c w2 is x(j - 1 / 2)\n a[j] = -c20 * c2[j] * (theta / deltal / w1 + 0.5)\n t1 = -c20 * c2[j] * (0.5 - theta / deltal / w1)\n t2 = c20 * c2[j - 1] * (theta / deltal / w2 + 0.5)\n t3 = x[j]**3 * (tautom * bet[j])\n b[j] = t1 + t2 + t3\n c[j] = c20 * c2[j - 1] * (0.5 - theta / deltal / w2)\n d[j] = x[j] * dphdot[j]\n\n #c define constants going into boundary terms\n #c u(1) = aa * u(2) (zero flux at lowest energy)\n #c u(jx2) given from region 2 above\n x32 = np.sqrt(x[0] * x[1])\n aa = (theta / deltal / x32 + 0.5) / (theta / deltal / x32 - 0.5)\n\n #c zero flux at the highest energy\n u[jmax - 1] = 0.0\n\n #c invert tridiagonal matrix\n alp[1] = b[1] + c[1] * aa\n gam[1] = a[1] / alp[1]\n for j in range(2, jmax - 1):\n alp[j] = b[j] - c[j] * gam[j - 1]\n gam[j] = a[j] / alp[j]\n g[1] = d[1] / alp[1]\n for j in range(2, jmax - 2):\n g[j] = (d[j] - c[j] * g[j - 1]) / alp[j]\n g[jmax - 2] = (d[jmax - 2] - a[jmax - 2] * u[jmax - 1] \n - c[jmax - 2] * g[jmax - 3]) / alp[jmax - 2]\n u[jmax - 2] = g[jmax - 2]\n for j in range(2, jmax + 1):\n jj = jmax - j\n u[jj] = g[jj] - gam[jj] * u[jj + 1]\n u[0] = aa * u[1]\n #c compute new value of dph(x) and new value of dphesc(x)\n dphesc[:jmax] = x[:jmax] * x[:jmax] * u[:jmax] * bet[:jmax] * tautom\n\n return dphesc",
"def compute_time_optimal_stabilizer(self) -> np.ndarray:\n N = self.N\n M = self.M\n N0 = set(self.compute_largest_control_invariant_subset())\n TM = self.compute_shortest_transient_period()\n # compute a list of N's\n Ns = [None] * (TM + 1)\n Ns[0] = N0\n exclude = N0\n for i in range(1, TM + 1):\n Ns[i] = self._get_predecessors_for_set(Ns[i-1]) - exclude\n exclude = exclude | Ns[i]\n # define Boolean matrices\n NB = [None] * (TM + 1)\n for i in range(TM + 1):\n nb = np.zeros((N, N), dtype=np.bool_)\n for j in range(1, N + 1):\n if j in Ns[i]:\n nb[j - 1, j - 1] = 1\n NB[i] = nb\n # convert BCN to an equivalent and prepare f(x)\n # NOTE: Eq. (30) and (31) are optimized here to reduce computational complexity\n # We don't use the naive matrix product, but exploit the special structure.\n L_tilde = self._exchange_x_u()\n NL = [None] * TM # NL[i] = N_i * L_tilde\n for i in range(TM):\n NL[i] = np.empty((N, M*N), dtype=np.bool_)\n for col, j in enumerate(L_tilde):\n NL[i][:, col] = NB[i][j - 1]\n # compute F\n F = np.zeros((M, N), dtype=np.bool_)\n for x in range(1, N + 1):\n if x in Ns[0]:\n F[:, x - 1] = ba.col_sum(NL[0][:, (x - 1) * M: x*M]).T\n else:\n # find the set Ni that x lies in\n for i in range(1, TM + 1):\n if x in Ns[i]:\n break\n F[:, x - 1] = ba.col_sum(NL[i - 1][:, (x - 1) * M: x*M]).T\n return F",
"def reconstruct_ica(pulse_matr, ica, rm_comp=[]):\n\n src = ica.transform(pulse_matr)\n src[:, rm_comp] = 0\n recons_pulse_matr = src.dot(ica.mixing_.T)\n\n return recons_pulse_matr",
"def calc_constraints_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([c(x) for c in self.constraints])",
"def c_objective(x,grad,params_c,params_f,rAtheta,c_bounds, nu_c,fixed_params, fixed_param_values):\n if not fixed_params[0]: Ic_norm=x[0]\n else: Ic_norm=fixed_param_values[0]\n \n if not fixed_params[1]: rc_norm=x[1]\n else: rc_norm=fixed_param_values[1]\n \n if not fixed_params[2]: zc_norm=x[2]\n else: zc_norm=fixed_param_values[2]\n \n # Recover real units of guesses:\n Ic=Ic_norm*c_bounds[0][1]\n rc=rc_norm*c_bounds[1][1]\n \n # Recover sign of zc:\n zc= - zc_norm*c_bounds[2][1]\n\n # unwrap C and F-coil parameters\n tzc,trc,nzc,nrc = params_c\n zf,rf = params_f\n\n # get fields from C-coil parameters\n X,Z,Bxm,Bzm,Bs,rAm = multicoil_fields([Ic],[zc],[rc],[tzc],[trc],[nzc],[nrc])\n\n # find rAm at zf and rf position\n xidx=np.argmin(np.abs(X[0,:]-rf))\n zidx=np.argmin(np.abs(Z[:,0]-zf))\n \n rAm_xz=rAm[xidx,zidx]\n out = np.abs(rAtheta - rAm_xz)\n print out,nu_c[0]*Ic, Ic,rc,zc\n return out+nu_c[0]*Ic",
"def _get_rr_cc(self):\n theta = self.phi\n center = self.center[::-1] #Necessary \n\n if theta % 360.0 == 0.0:\n return self.unrotated_rr_cc\n\n # Rotate transposed rr_cc\n transposed = np.array(self.unrotated_rr_cc).T\n return rotate(transposed, center=center, theta=self.phi, rint='up').T",
"def _compute_control_inputs(self, traj):\n\n r = traj.u.copy() # reference is input of combined sys\n npts = traj.t.shape[0]\n u = np.zeros([npts, self.cds.plant.m])\n\n # Compute internal input\n for i in range(npts):\n\n ri = r[i,:]\n yi = traj.y[i,:]\n ti = traj.t[i]\n\n ui = self.cds.controller.c( yi , ri , ti )\n\n u[i,:] = ui\n\n return u",
"def generar_matriz_R(self, tp):\n # modulo del campo en el plano xy\n B1 = np.array([self.Bx, self.By])\n B1 = np.linalg.norm(B1, axis=0)\n\n # tres componentes de la direccion de rotacion. Cada U es un array de\n # n elementos, uno por cada sitio. Uz son ceros porque el campo en z\n # NO excita los spines.\n Ux = self.Bx/B1\n Uy = self.By/B1\n Uz = np.zeros_like(Ux)\n \n angulo = B1*tp\n \n # array de ceros y unos de tamano nx1\n zeros = np.zeros_like(Ux)\n ones = np.ones_like(Ux)\n \n # para definir la matriz uso la formula de Rodrigues:\n # https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle\n U_matrix = np.array([[ zeros, -Uz , Uy ],\n [ Uz , zeros, -Ux ],\n [-Uy , Ux , zeros]]\n )\n \n Uxy, Uxz, Uyz = [Ux*Uy, Ux*Uz, Uy*Uz]\n U2_matrix = np.array([[Ux*Ux, Uxy , Uxz ],\n [Uxy , Uy*Uy, Uyz ],\n [Uxz , Uyz , Uz*Uz]]\n )\n \n I = np.array([[ones, zeros, zeros], [zeros, ones, zeros], [zeros, zeros, ones]])\n \n R = np.cos(angulo) * I + np.sin(angulo) * U_matrix + (1-np.cos(angulo)) * U2_matrix\n # convierto en array nx3x3\n R = np.moveaxis(R,2,0)\n return R",
"def capp1_constraints(self):\n constraints = []\n for i in range(1, self.x + 1):\n for k in range(1, self.y + 1):\n equation = f\"\\tcapS{i}{k}: \" # Need S to differentiate between the two capacity constraints\n capp1 = []\n for j in range(1, self.z + 1):\n capp1.append(f\"x{i}{k}{j}\")\n equation += \" + \".join(capp1) + f\" - c{i}{k} <= 0\"\n constraints.append(equation)\n capp1_constraints = \"\\n\".join(constraints)\n capp1_constraints += \"\\n\"\n return capp1_constraints",
"def transit_constraints(self):\n constraints = []\n for k in range(1, self.y + 1):\n equation = f\"\\ttransit{k}: \"\n transit = []\n for i in range(1, self.x + 1):\n for j in range(1, self.z + 1):\n transit.append(f\"x{i}{k}{j}\")\n equation += \" + \".join(transit) + f\" - r <= 0\"\n constraints.append(equation)\n transit_constraints = \"\\n\".join(constraints)\n transit_constraints += \"\\n\"\n return transit_constraints",
"def test_cost_gradient(self):\n\n # Use seed for deterministic testing\n np.random.seed(42)\n\n def test(shape, plates, \n axis=-1, \n alpha_plates=None, \n plate_axis=None,\n mu=3):\n \n if plate_axis is not None:\n precomputes = [False, True]\n else:\n precomputes = [False]\n \n for precompute in precomputes:\n # Construct the model\n D = shape[axis]\n if alpha_plates is not None:\n alpha = Gamma(3, 5,\n plates=alpha_plates)\n alpha.initialize_from_random()\n else:\n alpha = 2\n X = GaussianARD(mu, alpha,\n shape=shape,\n plates=plates)\n\n # Some initial learning and rotator constructing\n X.initialize_from_random()\n Y = GaussianARD(X, 1)\n Y.observe(np.random.randn(*(Y.get_shape(0))))\n X.update()\n if alpha_plates is not None:\n alpha.update()\n rotX = RotateGaussianARD(X, alpha, \n axis=axis,\n precompute=precompute)\n else:\n rotX = RotateGaussianARD(X, \n axis=axis,\n precompute=precompute)\n try:\n mu.update()\n except:\n pass\n\n # Rotation matrices\n R = np.random.randn(D, D)\n if plate_axis is not None:\n C = plates[plate_axis]\n Q = np.random.randn(C, C)\n else:\n Q = None\n\n # Compute bound terms\n rotX.setup(plate_axis=plate_axis)\n\n if plate_axis is None:\n def f_r(r):\n (b, dr) = rotX.bound(np.reshape(r, np.shape(R)))\n return (b, np.ravel(dr))\n else:\n def f_r(r):\n (b, dr, dq) = rotX.bound(np.reshape(r, np.shape(R)),\n Q=Q)\n return (b, np.ravel(dr))\n\n def f_q(q):\n (b, dr, dq) = rotX.bound(R,\n Q=np.reshape(q, np.shape(Q)))\n return (b, np.ravel(dq))\n\n # Check gradient with respect to R\n err = optimize.check_gradient(f_r, \n np.ravel(R), \n verbose=False)[1]\n self.assertAllClose(err, 0, \n atol=1e-4,\n msg=\"Gradient incorrect for R\")\n\n # Check gradient with respect to Q\n if plate_axis is not None:\n err = optimize.check_gradient(f_q, \n np.ravel(Q), \n verbose=False)[1]\n self.assertAllClose(err, 0,\n atol=1e-4,\n msg=\"Gradient incorrect for Q\")\n\n return\n\n #\n # Basic rotation\n #\n test((3,), (), axis=-1)\n test((2,3,4), (), axis=-1)\n test((2,3,4), (), axis=-2)\n test((2,3,4), (), axis=-3)\n test((2,3,4), (5,6), axis=-2)\n\n #\n # Rotation with mu\n #\n\n # Simple\n test((1,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over rotated dim\n test((3,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(),\n plates=()))\n # Broadcast mu over dim when multiple dims\n test((2,3), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(1,3),\n plates=()))\n test((2,3), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over rotated dim when multiple dims\n test((2,3), (), axis=-2,\n mu=GaussianARD(2, 4,\n shape=(1,3),\n plates=()))\n test((2,3), (), axis=-2,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over plates\n test((3,), (4,5), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n test((3,), (4,5), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n\n #\n # Rotation with alpha\n #\n\n # Simple\n test((1,), (), axis=-1,\n alpha_plates=())\n test((3,), (), axis=-1,\n alpha_plates=(3,))\n # Broadcast alpha over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=())\n test((3,), (), axis=-1,\n alpha_plates=(1,))\n # Broadcast alpha over dim when multiple dims\n test((2,3), (), axis=-1,\n alpha_plates=(1,3))\n test((2,3), (), axis=-1,\n alpha_plates=(3,))\n # Broadcast alpha over rotated dim when multiple dims\n test((2,3), (), axis=-2,\n alpha_plates=(1,3))\n test((2,3), (), axis=-2,\n alpha_plates=(3,))\n # Broadcast alpha over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3))\n\n #\n # Rotation with alpha and mu\n #\n\n # Simple\n test((1,), (), axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(3,),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=(3,),\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(3,),\n mu=GaussianARD(2, 4,\n shape=(),\n plates=()))\n # Broadcast alpha over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast both mu and alpha over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(),\n mu=GaussianARD(2, 4,\n shape=(),\n plates=()))\n # Broadcast mu over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n # Broadcast alpha over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,5)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,5)))\n # Broadcast both mu and alpha over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n # Broadcast both mu and alpha over plates but different plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n\n #\n # Rotation with missing values\n #\n\n # TODO\n\n #\n # Plate rotation\n #\n\n # Simple\n test((2,), (3,), axis=-1, plate_axis=-1)\n test((2,), (3,4,5), axis=-1, plate_axis=-1)\n test((2,), (3,4,5), axis=-1, plate_axis=-2)\n test((2,), (3,4,5), axis=-1, plate_axis=-3)\n test((2,3), (4,5), axis=-2, plate_axis=-2)\n\n # With mu\n test((2,), (3,), axis=-1, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=(3,)))\n # With mu broadcasted\n test((2,), (3,), axis=-1, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=(1,)))\n test((2,), (3,), axis=-1, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=()))\n # With mu multiple plates\n test((2,), (3,4,5), axis=-1, plate_axis=-2,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=(3,4,5)))\n # With mu multiple dims\n test((2,3,4), (5,), axis=-2, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,3,4),\n plates=(5,)))\n\n #\n # With alpha\n #\n print(\"Test: Plate rotation with alpha. Scalars.\")\n test((1,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=0)\n print(\"Test: Plate rotation with alpha. Plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(3,1),\n mu=0)\n print(\"Test: Plate rotation with alpha. Dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,3),\n mu=0)\n print(\"Test: Plate rotation with alpha. Broadcast alpha over rotated plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=0)\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,),\n mu=0)\n print(\"Test: Plate rotation with alpha. Broadcast alpha over dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=0)\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(),\n mu=0)\n print(\"Test: Plate rotation with alpha. Multiple dims.\")\n test((2,3,4,5), (6,), axis=-2, plate_axis=-1,\n alpha_plates=(6,2,3,4,5),\n mu=0)\n print(\"Test: Plate rotation with alpha. Multiple plates.\")\n test((2,), (3,4,5), axis=-1, plate_axis=-1,\n alpha_plates=(3,4,5,2),\n mu=0)\n test((2,), (3,4,5), axis=-1, plate_axis=-2,\n alpha_plates=(3,4,5,2),\n mu=0)\n test((2,), (3,4,5), axis=-1, plate_axis=-3,\n alpha_plates=(3,4,5,2),\n mu=0)\n\n #\n # With alpha and mu\n #\n print(\"Test: Plate rotation with alpha and mu. Scalars.\")\n test((1,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(1,)))\n print(\"Test: Plate rotation with alpha and mu. Plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(3,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(3,)))\n print(\"Test: Plate rotation with alpha and mu. Dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,3),\n mu=GaussianARD(2, 3,\n shape=(3,),\n plates=(1,)))\n print(\"Test: Plate rotation with alpha and mu. Broadcast over rotated \"\n \"plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(1,)))\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=()))\n print(\"Test: Plate rotation with alpha and mu. Broadcast over dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(1,)))\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(),\n mu=GaussianARD(2, 3,\n shape=(),\n plates=(1,)))\n print(\"Test: Plate rotation with alpha and mu. Multiple dims.\")\n test((2,3,4,5), (6,), axis=-2, plate_axis=-1,\n alpha_plates=(6,2,3,4,5),\n mu=GaussianARD(2, 3,\n shape=(2,3,4,5),\n plates=(6,)))\n print(\"Test: Plate rotation with alpha and mu. Multiple plates.\")\n test((2,), (3,4,5), axis=-1, plate_axis=-1,\n alpha_plates=(3,4,5,2),\n mu=GaussianARD(2, 3,\n shape=(2,),\n plates=(3,4,5,)))\n test((2,), (3,4,5), axis=-1, plate_axis=-2,\n alpha_plates=(3,4,5,2),\n mu=GaussianARD(2, 3,\n shape=(2,),\n plates=(3,4,5,)))\n test((2,), (3,4,5), axis=-1, plate_axis=-3,\n alpha_plates=(3,4,5,2),\n mu=GaussianARD(2, 3,\n shape=(2,),\n plates=(3,4,5,)))\n\n # TODO: With missing values\n \n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Starts a job to load a bigquery table from CSV
|
def load_table(bigquery, project_id, dataset_id, table_name, source_schema,
source_path, num_retries=5):
# Generate a unique job_id so retries
# don't accidentally duplicate query
job_data = {
'jobReference': {
'projectId': project_id,
'job_id': str(uuid.uuid4())
},
'configuration': {
'load': {
'sourceUris': [source_path],
'schema': {
'fields': source_schema
},
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_name
}
}
}
}
return bigquery.jobs().insert(
projectId=project_id,
body=job_data).execute(num_retries=num_retries)
|
[
"def load_to_gbq(filename, bq_configuration):\n # construct Client object with the path to the table in which data will be stored\n client = bigquery.Client(project = bq_configuration[\"project_id\"])\n dataset_ref = client.dataset(bq_configuration[\"dataset_id\"])\n table_ref = dataset_ref.table(bq_configuration[\"table\"])\n\n # determine uploading options\n job_config = bigquery.LoadJobConfig()\n job_config.write_disposition = 'WRITE_TRUNCATE'\n job_config.source_format = bq_configuration[\"source_format\"]\n job_config.autodetect = True\n if bq_configuration[\"source_format\"].upper() == \"CSV\":\n job_config.skip_leading_rows = 1\n\n # upload the file to BigQuery table\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, location = bq_configuration[\"location\"], job_config = job_config)\n job.result()\n print(\"The Job \" + job.job_id + \" in status \" + job.state + \" for table \" + bq_configuration[\"project_id\"] + \".\" +\n bq_configuration[\"dataset_id\"] + \".\" + bq_configuration[\"table\"] + \".\")\n os.remove(filename)",
"def upload_table(self, table):\n self.open_con(fetchall=False)\n \n # download and save to temporary CSV\n self.cur.execute(\"SELECT * FROM %s\" % (table))\n tmp_file = \"/tmp/%s.csv\" % (table)\n with open(tmp_file, 'w') as out:\n csv_out = csv.writer(out, quotechar='\"', escapechar='\\\\',\n doublequote=True, quoting=csv.QUOTE_MINIMAL,\n lineterminator='\\n')\n for row in self.cur:\n csv_out.writerow(row)\n \n # upload to BQ\n file_size = os.stat(tmp_file).st_size\n file_size = float(file_size) / 1000000 # MB\n \n bq_table = \"%s.%s\" % (self.conf.get('big_query', 'db'),\n self.conf.get('big_query', table + '_table'))\n \n msg = \"Uploading %.1f MB to table '%s' on Big Query...\" \\\n % (file_size, bq_table)\n self.logger.info(msg)\n \n bq_cmd = (\"bq load --replace --source_format=CSV \"\n \"%s %s bigquery_schema/%s\")\n bq_cmd = bq_cmd % (bq_table, tmp_file, table)\n \n process = subprocess.Popen(bq_cmd.split(), stdout=subprocess.PIPE)\n \n self.logger.info(\" Done.\")\n \n self.close_con()",
"def upload_bq(bq_project, bq_dataset, table_name,gsc_schemas,bq_tmp_file,cl,bq_dataset_location,bq_check,bq_alert_empty,\n bq_alert_callback,script_file):\n\n\n # create the configuration for an upload job\n final_table_name = u\"%s.%s.%s\" % (bq_project, bq_dataset, table_name)\n jc = bigquery.LoadJobConfig()\n jc.schema = gsc_schemas\n jc.source_format = bigquery.SourceFormat.CSV\n jc.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n\n # create a job to upload the rows\n with open(bq_tmp_file, \"rb\") as f:\n\n jb = cl.load_table_from_file(f, final_table_name, location=bq_dataset_location, job_config=jc)\n\n try:\n # upload the rows\n rs = jb.result()\n print(\"Table uploaded to BQ \\n\")\n # check if the table was created successfully\n if bq_check == True:\n if not cl.get_table(final_table_name):\n if bq_alert_empty == True:\n bq_alert_callback(script_file, u\"[bq] table '%s' was not created\" % final_table_name)\n except Exception as e:\n logging.error(f\"Could not upload the table to BQ: {e}\")\n\n print(u\"ERROR: %s\" % table_name)\n\n if jb.errors:\n for i in jb.errors:\n print(u\"ERROR: %s\" % i[\"message\"])\n else:\n print(e)\n\n f.close()",
"def main():\n print_handler = logging.StreamHandler(sys.stdout)\n print_handler.setLevel(logging.DEBUG)\n fmt = '%(asctime)-15s %(levelname)s %(message)s'\n print_handler.setFormatter(logging.Formatter(fmt))\n\n job_name = getpass.getuser() + '-cli-job'\n log = logging.getLogger(job_name)\n log.setLevel(logging.DEBUG)\n log.addHandler(print_handler)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--query_file', dest='query_file', required=True,\n help=\"Path to your bigquery sql file.\")\n parser.add_argument('--gcs_destination', dest='gcs_destination', required=False,\n help=\"GCS wildcard path to write files.\", default=None)\n parser.add_argument('--gcs_export_format', dest='gcs_format', required=False,\n help=\"Format for export. CSV | AVRO | JSON\", default='CSV')\n args = parser.parse_args()\n\n bqp = BQPipeline(job_name)\n bqp.run_query((args.query_file, args.gcs_destination), gcs_format=args.gcs_format)",
"def BigQuery_Query(\n keys = None,\n dataset_name = None,\n table_name = None,\n project_id = None\n ):\n\n if keys:\n\n # Set GCP keys to env variable:\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=keys\n\n # Write a SQL query:\n SQL = \"\"\"\n SELECT * \n FROM `{}`\n \"\"\".format(dataset_name + '.' + table_name)\n\n # Execute query and save results in a pandas df:\n df = pandas_gbq.read_gbq(query=SQL)\n\n else:\n\n # Write a SQL query:\n SQL = \"\"\"\n SELECT * \n FROM `{}`\n \"\"\".format(dataset_name + '.' + table_name)\n\n # Execute query and save results in a pandas df:\n df = pandas_gbq.read_gbq(query=SQL, project_id=project_id)\n\n print(f'Imported {table_name}, Table Dimensions: {df.shape}')\n return df",
"def load_csv(\n path, field_spec, db_name, table_name,\n create_db=False, sep=\",\", headers=True\n):\n\n create_db_cmd = \"\"\"\n CREATE DATABASE IF NOT EXISTS {db_name}\n \"\"\"\n\n drop_table_cmd = \"\"\"\n DROP TABLE IF EXISTS {db_name}.{table_name}\n \"\"\"\n\n create_table_cmd = \"\"\"\n CREATE TABLE {db_name}.{table_name} ({field_spec})\n ROW FORMAT DELIMITED FIELDS TERMINATED BY \"{sep}\"\n STORED AS TEXTFILE\n \"\"\"\n\n load_table_cmd = \"\"\"\n LOAD DATA INPATH \"hdfs://{path}\"\n OVERWRITE INTO TABLE {db_name}.{table_name}\n \"\"\"\n\n try:\n __, tmp_path = tempfile.mkstemp()\n if headers:\n with open(path, 'r') as source, open(tmp_path, 'w') as target:\n # Consume the first line so it doesn't make it to the copy\n source.readline()\n copyfileobj(source, target)\n path = tmp_path\n\n # Copy the file to HDFS because pyhive runs via JDBC not off the local client\n hdfs_path = f\"{path}\"\n subprocess.run([\"hdfs\", \"dfs\", \"-mkdir\", \"-p\", hdfs_path])\n subprocess.run([\"hdfs\", \"dfs\", \"-put\", path, hdfs_path])\n\n cmd_params = {\n \"db_name\": db_name,\n \"field_spec\": field_spec,\n # To do: Convert relative paths (e.g. \"~/data.csv\") into absolute paths\n \"path\": hdfs_path,\n \"sep\": sep,\n \"table_name\": table_name\n }\n\n if create_db:\n run(create_db_cmd.format(**cmd_params))\n run([\n drop_table_cmd.format(**cmd_params),\n create_table_cmd.format(**cmd_params),\n load_table_cmd.format(**cmd_params)\n ])\n finally:\n if tmp_path:\n os.unlink(tmp_path)",
"def bq_load_shard(\n schema_folder: str,\n release_date: pendulum.DateTime,\n transform_blob: str,\n dataset_id: str,\n table_id: str,\n source_format: str,\n prefix: str = \"\",\n schema_version: str = None,\n dataset_description: str = \"\",\n **load_bigquery_table_kwargs,\n):\n _, bucket_name, data_location, schema_file_path = prepare_bq_load(\n schema_folder, dataset_id, table_id, release_date, prefix, schema_version, dataset_description\n )\n\n # Create table id\n table_id = bigquery_sharded_table_id(table_id, release_date)\n\n # Load BigQuery table\n uri = f\"gs://{bucket_name}/{transform_blob}\"\n logging.info(f\"URI: {uri}\")\n\n success = load_bigquery_table(\n uri, dataset_id, data_location, table_id, schema_file_path, source_format, **load_bigquery_table_kwargs\n )\n if not success:\n raise AirflowException()",
"def read_from_bq():\n credentials = service_account.Credentials.from_service_account_file(\n SERVICE_ACCOUNT_FILE, scopes=[\"https://www.googleapis.com/auth/cloud-platform\"])\n bq_client = bigquery.Client(credentials=credentials, project=credentials.project_id)\n #bq_client = bigquery.Client()\n query_job = bq_client.query(BQ_READ_QUERY)\n results = query_job.result()\n dataframe = results.to_dataframe()\n return dataframe",
"def load_files(gs_file_names: List[str]) -> None:\n\n partition_sql = get_partition_sql_from_file_names(gs_file_names)\n gs_file_names_string = \",\".join([ f\"'{f}'\" for f in gs_file_names])\n sql = f\"\"\"\n SELECT 1 FROM data_test.users \n WHERE {partition_sql} and _FILE_NAME in ({gs_file_names_string});\n \"\"\"\n\n # when we gets triggered, the file is uploaded but it will take some time \n # to show up in external_table, so we loop/wait for 3 minutes (18 round sleep 10s)\n loop_cnt = 0\n while (loop_cnt < 18):\n time.sleep(10)\n results = client.query(sql)\n print(f\"resuls count: {len(list(results))}\")\n if len(list(results)) > 0:\n loop_cnt = 1000\n loop_cnt += 1\n if loop_cnt < 1000: # we timed out \n print(\"timed out, the external table doesn't have the new uploaded data in GCS.\")\n return\n sql = f\"\"\"\n SELECT * FROM data_test.bq_users \n WHERE {partition_sql} and gcs_file_name in ({gs_file_names_string});\n \"\"\"\n print(sql)\n results = client.query(sql)\n print(list(results))\n if len(list(results)) > 0:\n sql = f\"\"\"\n DELETE FROM data_test.bq_users \n WHERE {partition_sql} and gcs_file_name in ({gs_file_names_string});\n \"\"\"\n print(sql)\n results = client.query(sql)\n\n sql = f\"\"\"\n INSERT INTO data_test.bq_users\n SELECT *, _FILE_NAME as gcs_file_name FROM data_test.users\n WHERE {partition_sql} and _FILE_NAME in ({gs_file_names_string});\n \"\"\"\n print(sql)\n query_job = client.query(sql)\n results = query_job.result()\n print(results)",
"def sqlite_bulk_load2(\n path: str,\n csv_file: str,\n table_name: str,\n read_csv_args: Dict = None,\n) -> None:\n db_name = Path(path).resolve()\n csv_file = Path(csv_file).resolve()\n engine = create_engine(\"sqlite:///\" + str(db_name))\n\n if \"chunksize\" in read_csv_args and read_csv_args.get(\"chunksize\") is not None:\n with pd.read_csv(csv_file, **read_csv_args) as reader:\n for chunk in reader:\n chunk.to_sql(table_name, engine, if_exists=\"append\", index=False)\n else:\n df = pd.read_csv(csv_file, **read_csv_args)\n df.to_sql(table_name, engine, if_exists=\"append\", index=False)",
"def import_csv_into_sqlite(\n csv_table_path: str,\n table_name: str,\n sqlite_db_path: str\n) -> None:\n subprocess.run(\n [\n 'sqlite3',\n '-separator',\n ',',\n sqlite_db_path,\n f\".import {csv_table_path} {table_name}\",\n ]\n )",
"def main(csv_path: str = SENSOR_CSV_PATH) -> None:\n user, pw = secrets.db.epi\n engine = sqlalchemy.create_engine(f\"mysql+pymysql://{user}:{pw}@{secrets.db.host}/{DB_NAME}\")\n for filepath, attribute in CsvImporter.find_issue_specific_csv_files(csv_path):\n if attribute is None:\n _move_after_processing(filepath, success=False)\n continue\n try:\n data = load_and_prepare_file(filepath, attribute)\n with engine.connect() as conn:\n method = _create_upsert_method(sqlalchemy.MetaData(conn))\n data.to_sql(TABLE_NAME, engine, if_exists=\"append\", method=method, index=False)\n except Exception:\n _move_after_processing(filepath, success=False)\n raise\n _move_after_processing(filepath, success=True)",
"def bq_load_shard_v2(\n schema_folder: str,\n project_id: str,\n transform_bucket: str,\n transform_blob: str,\n dataset_id: str,\n dataset_location: str,\n table_id: str,\n release_date: pendulum.Date,\n source_format: str,\n prefix: str = \"\",\n schema_version: str = None,\n dataset_description: str = \"\",\n **load_bigquery_table_kwargs,\n):\n\n schema_file_path = prepare_bq_load_v2(\n schema_folder,\n project_id,\n dataset_id,\n dataset_location,\n table_id,\n release_date,\n prefix,\n schema_version,\n dataset_description,\n )\n\n # Create table id\n table_id = bigquery_sharded_table_id(table_id, release_date)\n\n # Load BigQuery table\n uri = f\"gs://{transform_bucket}/{transform_blob}\"\n logging.info(f\"URI: {uri}\")\n\n success = load_bigquery_table(\n uri,\n dataset_id,\n dataset_location,\n table_id,\n schema_file_path,\n source_format,\n project_id=project_id,\n **load_bigquery_table_kwargs,\n )\n if not success:\n raise AirflowException()",
"def bq_load_partition(\n schema_folder: str,\n project_id: str,\n transform_bucket: str,\n transform_blob: str,\n dataset_id: str,\n dataset_location: str,\n table_id: str,\n release_date: pendulum.DateTime,\n source_format: str,\n partition_type: bigquery.TimePartitioningType,\n prefix: str = \"\",\n schema_version: str = None,\n dataset_description: str = \"\",\n partition_field: str = \"release_date\",\n **load_bigquery_table_kwargs,\n):\n\n schema_file_path = prepare_bq_load_v2(\n schema_folder,\n project_id,\n dataset_id,\n dataset_location,\n table_id,\n release_date,\n prefix,\n schema_version,\n dataset_description,\n )\n\n uri = f\"gs://{transform_bucket}/{transform_blob}\"\n\n # Include date in table id, so data in table is not overwritten\n table_id = create_date_table_id(table_id, release_date, partition_type)\n success = load_bigquery_table(\n uri,\n dataset_id,\n dataset_location,\n table_id,\n schema_file_path,\n source_format,\n project_id=project_id,\n partition=True,\n partition_field=partition_field,\n partition_type=partition_type,\n **load_bigquery_table_kwargs,\n )\n if not success:\n raise AirflowException()",
"def main(project_id,service_account_file):\n # 1. authenticate with BigQuery client\n client = authenticate_with_bigquery(project_id,service_account_file)\n\n # 2. Create dataset and table in BigQuery project if they don't exist\n destination_table = create_util_dataset_and_table(client)\n\n # 3. Scan through all datasets in a project and store stats in a destination table\n datasets = client.list_datasets()\n processing_timestamp = datetime.now()\n\n for dataset in datasets:\n dataset_id = dataset.full_dataset_id\n logging.info(\"Updating stats for : {}\".format(dataset_id))\n\n query = str.format(\"INSERT `{}` (processing_time, project_id, dataset_id, table_id, \"\n \"creation_time, last_modified_time, row_count, size_bytes) \".format(destination_table)+\n \"SELECT DATETIME '{}' as processing_time,project_id, dataset_id, table_id, \"\n \"creation_time, last_modified_time, row_count, size_bytes \"\n \"FROM `{}.__TABLES__` where type = 1\"\n , processing_timestamp,dataset_id)\n query_job = client.query(query)\n\n # shouldn't return rows because all rows are getting inserted in a table but this can be used for debugging\n for row in query_job:\n logging.info(\"{} | {} | {} | {} | {} | {} | {} | {} | {}\".format(row.processing_time\n , row.project_id\n , row.dataset_id\n , row.table_id\n , row.creation_time\n , row.last_modified_time\n , row.row_count\n , row.size_bytes\n , row.type\n )\n )",
"def bq_load_ingestion_partition(\n schema_folder: str,\n end_date: pendulum.DateTime,\n transform_blob: str,\n dataset_id: str,\n main_table_id: str,\n partition_table_id: str,\n source_format: str,\n prefix: str = \"\",\n schema_version: str = None,\n dataset_description: str = \"\",\n partition_type: bigquery.TimePartitioningType = bigquery.TimePartitioningType.DAY,\n **load_bigquery_table_kwargs,\n):\n _, bucket_name, data_location, schema_file_path = prepare_bq_load(\n schema_folder, dataset_id, main_table_id, end_date, prefix, schema_version, dataset_description\n )\n\n uri = f\"gs://{bucket_name}/{transform_blob}\"\n\n # Include date in table id, so data in table is not overwritten\n partition_table_id = create_date_table_id(partition_table_id, pendulum.today(), partition_type)\n success = load_bigquery_table(\n uri,\n dataset_id,\n data_location,\n partition_table_id,\n schema_file_path,\n source_format,\n partition=True,\n partition_type=partition_type,\n **load_bigquery_table_kwargs,\n )\n if not success:\n raise AirflowException()",
"def __init__(self, job_id, table_name, sql, context):\n super(QueryJob, self).__init__(job_id, context)\n self._sql = sql\n self._table = _query_results_table.QueryResultsTable(table_name, context, self,\n is_temporary=True)\n self._bytes_processed = None\n self._cache_hit = None\n self._total_rows = None",
"def download_csv_from_bq():\n bq_client = bigquery.Client()\n # Set the table (destination of query, source for extraction to cloud storage).\n table_ref = bq_client.dataset(config.BIGQUERY_DATASET_ID, project=config.BIGQUERY_PROJECT_NAME).table(config.BIGQUERY_TABLE_ID)\n perform_bq_query_job(bq_client)\n perform_bq_extract_job(bq_client, table_ref)\n\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(config.CLOUD_STORAGE_BUCKET_NAME)\n file_indices = download_csv_from_gcs(storage_client, bucket)\n return file_indices",
"def get_data(nrows=1_000):\n df = pd.read_csv(f\"gs://{BUCKET_NAME}/{BUCKET_TRAIN_DATA_PATH}\", nrows=nrows)\n return df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Column that adds primary key foreign key reference.
|
def ReferenceCol(tablename, nullable=False, pk_name='id', **kwargs):
return db.Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name)),
nullable=nullable, **kwargs)
|
[
"def reference_col(tablename, nullable=False, pk_name='id', ondelete='CASCADE', **kwargs):\n return db.Column(db.ForeignKey(f'{tablename}.{pk_name}', ondelete=ondelete),\n nullable=nullable, **kwargs)",
"def foreign_key(self: Fdef) -> Optional[str]:\n self._resolve_if_needed()\n return self._foreign_key",
"def _foreign_key_sql(self, from_table_name, from_column_name, to_table_name, to_column_name):\n constraint_name = \"%s_refs_%s_%x\" % (\n from_column_name,\n to_column_name,\n abs(hash((from_table_name, to_table_name))),\n )\n return \"ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;\" % (\n self.quote_name(from_table_name),\n self.quote_name(truncate_name(constraint_name, connection.ops.max_name_length())),\n self.quote_name(from_column_name),\n self.quote_name(to_table_name),\n self.quote_name(to_column_name),\n connection.ops.deferrable_sql(), # Django knows this\n )",
"def join_table_referee_key(self: Fdef) -> Optional[str]:\n self._resolve_if_needed()\n return self._join_table_referee_key",
"def foreign_key(self) -> bytes:\n fk = bytearray(29)\n for i in range(0, 28):\n fk[i] |= self.val[i + 3] >> 2\n fk[i] |= (self.val[i + 4] & 0x3) << 6\n\n fk[28] = self.val[31] >> 2\n\n return bytes(fk)",
"def primary_key(self, name):\n meta = self.model._meta\n pk_field = peewee.CompositeKey([name])\n meta.primary_key = pk_field\n meta.add_field(name, pk_field)\n\n field = peewee.AutoField(column_name=name)\n meta.add_field(name, field)",
"def _pk_constraint(self, table, column, status):\n if isinstance(column, basestring):\n column = getattr(table.c, name)\n\n ret = constraint.PrimaryKeyConstraint(*table.primary_key)\n if status:\n # Created PK\n ret.c.append(column)\n else:\n # Dropped PK\n names = [c.name for c in cons.c]\n index = names.index(col.name)\n del ret.c[index]\n\n # Allow explicit PK name assignment\n if isinstance(pk, basestring):\n ret.name = pk\n return ret",
"def join_table_referrer_key(self: Fdef) -> Optional[str]:\n self._resolve_if_needed()\n return self._join_table_referrer_key",
"def insert_fkey(self, foreignkey, rowcol):\n fk_key, fk_field = foreignkey\n if fk_key and fk_field and rowcol != '':\n # Allow users to specify app label for fk model if they want\n if fk_key.find(\".\") > -1:\n new_app_label, fk_key = fk_key.split(\".\")\n else:\n try:\n new_app_label = ContentType.objects.get(model=fk_key).app_label\n except:\n new_app_label = self.app_label\n fk_model = get_model(new_app_label, fk_key)\n matches = fk_model.objects.filter(**{fk_field + \"__exact\": rowcol})\n\n if not matches:\n key = fk_model()\n key.__setattr__(fk_field, rowcol)\n key.save()\n\n rowcol = fk_model.objects.filter(**{fk_field + \"__exact\": rowcol})[0]\n return rowcol",
"def get_key_sql(self, table, column):\n sql = []\n\n # add foreign keys\n for fkey in column.foreign_keys:\n actions = self.get_foreign_key_action_sql(fkey)\n sql.append(\n 'ALTER TABLE {} ADD CONSTRAINT {} FOREIGN KEY ({}) REFERENCES {}({}) {} {};'.format(\n table,\n fkey.name.replace('.', '__'),\n column.name,\n fkey.target_fullname.split('.')[0],\n fkey.target_fullname.split('.')[1],\n actions['on_delete'],\n actions['on_update']\n )\n )\n\n # add primary key if needed\n if column.primary_key:\n sql.append('ALTER TABLE {} ADD PRIMARY KEY({});'.format(\n table,\n column.compile(dialect=self.engine.dialect)\n ))\n\n return sql",
"def is_foreign_key(self, is_foreign_key):\n\n self._is_foreign_key = is_foreign_key",
"def fkcolumns(self):\n return self._fkcolumns",
"def primary_key(self, *args):\n return _fluent(self, '_primary_key', *args)",
"def test_add_table_with_foreign_key(self):\n # Setup\n metadata = Mock(spec_set=Metadata)\n metadata.get_tables.return_value = ['a_table', 'b_table']\n metadata._metadata = {'tables': dict()}\n\n # Run\n Metadata.add_table(metadata, 'x_table', parent='users')\n\n # Asserts\n expected_table_meta = {\n 'fields': dict()\n }\n\n assert metadata._metadata['tables']['x_table'] == expected_table_meta\n\n metadata.set_primary_key.call_count == 0\n metadata.add_relationship.assert_called_once_with('users', 'x_table', None)",
"def _handle_primary_key(mapping, fields):\n\n mapping[\"oid_as_pk\"] = bool(mapping.get(\"fields\", {}).get(\"Id\"))\n if mapping[\"oid_as_pk\"]:\n id_column = mapping[\"fields\"][\"Id\"]\n fields.append(Column(id_column, Unicode(255), primary_key=True))\n else:\n fields.append(Column(\"id\", Integer(), primary_key=True, autoincrement=True))",
"def makeFootnoteRefId(self, id):\r\n return 'fnref:%s' % id",
"def foreign_key_to(table: sa.Table,\n prefix='entity',\n **opts) -> typing.Iterable[sa.Column]: # pylint: disable=unsubscriptable-object\n for pk in table.primary_key:\n name = '%s_%s' % (prefix, pk.name)\n yield sa.Column(name, pk.type, sa.ForeignKey(pk), **opts)",
"def pk(self, table, owner=None, schema=None):\r\n cur = self.begin()\r\n cur.primarykeys(schema, owner, table)\r\n self.commit(cur)\r\n self.display()",
"def alternative_id_col(self):\n return self._alternative_id_col",
"def _closure_parent_pk(self):\n if hasattr(self, \"%s_id\" % self._closure_parent_attr):\n return getattr(self, \"%s_id\" % self._closure_parent_attr)\n else:\n parent = getattr(self, self._closure_parent_attr)\n return parent.pk if parent else None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given A Microsoft Graph List response (collection of objects) When calling parse_list() Then Validate output parsing
|
def test_parse_list():
with open('test_data/risk_detections_response.json') as f:
response = json.load(f)
human_readable_title = "Risks"
context_path = "Risks_path"
parsed = parse_list(response, human_readable_title=human_readable_title, context_path=context_path)
outputs = parsed.outputs
assert len(outputs) == 2
values = outputs[f'AADIdentityProtection.{context_path}(val.id === obj.id)'][0]
assert len(values) == len(response['value'][0]) # all fields parsed
next_link_dict = outputs[f'AADIdentityProtection.NextLink(obj.Description === "{context_path}")']
assert next_link_dict == {'Description': context_path,
'URL': 'https://graph.microsoft.com/beta/riskDetections?$skiptoken=dummy_skip_token'}
assert parsed.readable_output.startswith("### Risks (1 result)")
|
[
"def test_list_cast(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n entire_list = list(self.plist)\n self.assertEqual(entire_list, list(range(self.total)))\n self.assertEqual(len(responses.calls), self.lazy_pages(self.total-1))",
"def test_withListCompleted(self):\n self.assertWellFormedRequest({\"listCompleted\": True})",
"def test_ok_list_result(self):\n process_result = process_response(self.resp_ok_list, is_detail=False)\n self.assertEqual(process_result[\"result\"], 0)",
"def test_result_list_has_annotations(self):\n response = self.get_response(self.changelist_path)\n self.assertEqual(response.status_code, 200)\n self.assertIn('alias_list', response.context['cl'].result_list.query.annotations)",
"def test_get_list(self):\n\t\tinput = get_list('./tests/sample.json')\n\t\tassert isinstance(input, list)",
"def test_result_list_annotated_values(self):\n response = self.get_response(self.changelist_path)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n [\"B.R.M.C.\", \"BRMC\"],\n response.context['cl'].result_list[0].alias_list\n )",
"def test_item_list():\n\n jis = serializer.JSONLDFeedSerializer(\n './tests/files/test_jsonld_item_list_out.json',\n feed_type='ItemList')\n\n for i in range(5):\n mv = schema.Movie()\n mv.name.add().text = 'Movie ' + str(i + 1)\n mv.id = 'Id of Movie ' + str(i + 1)\n for j in range(3):\n actor = mv.actor.add().person\n actor.name.add().text = 'Actor ' + str(j + 1)\n jis.add_item(mv, schema)\n\n jis.close()\n\n with open('./tests/files/test_jsonld_item_list_out.json') as f:\n output = json.load(f)\n\n with open('./tests/files/test_jsonld_item_list.json') as f:\n expected = json.load(f)\n\n os.remove('./tests/files/test_jsonld_item_list_out.json')\n\n assert output == expected, 'Error in Serialization of ItemList.'",
"def test_validator_itemlist():\n\n v = validator.SchemaValidator(\n './tests/files/validator_constraints.ttl',\n './tests/files/test_report.html')\n\n with open('./tests/files/validator_item_list.json') as f:\n dump = json.load(f)\n\n v.add_entity(dump)\n v.close()\n os.remove('./tests/files/test_report.html')\n\n expected = []\n expected.append(utils.ResultRow(\n 'Id: movieid1',\n 'Name of movie must be string.',\n '.name',\n '123',\n 'Violation'\n ))\n expected.append(utils.ResultRow(\n 'Id: id2',\n 'Name of person must be string.',\n '.actor.name',\n '123',\n 'Warning'\n ))\n expected.append(utils.ResultRow(\n 'Id: id3',\n 'Name of organization must be string.',\n '.creator.url',\n '345',\n 'Info'\n ))\n expected.append(utils.ResultRow(\n 'Id: id3',\n 'Name of person must be string.',\n '.creator.name',\n '123',\n 'Warning'\n ))\n\n assert(len(v.reports['Movie']) == len(expected)\n ), 'Expected report count not equal.'\n\n for m in expected:\n assert m in v.reports['Movie'], 'Expected report not generated.'",
"def test_get_list(self):\n #Get and verify the resp\n resp = self.client.get('/api/v1/acknowledgement/')\n self.assertEqual(resp.status_code, 200, msg=resp)\n\n #Verify the data sent\n resp_obj = resp.data\n self.assertIsNotNone(resp_obj['results'])\n self.assertEqual(len(resp_obj['results']), 1)\n self.assertEqual(len(resp_obj['results'][0]['items']), 2)",
"def parse_list(cls, data, **kwargs):\n results = ResultSet()\n data = data or []\n for obj in data:\n if obj:\n results.append(cls.parse(obj, **kwargs))\n return results",
"def test_is_list_true(test_rlp_reader_contract):\n contract = test_rlp_reader_contract\n rlp_encoded_item = rlp.encode([1, 2, 3])\n\n assert contract.functions.testIsList(rlp_encoded_item).call() is True",
"def transform_list_response(response):\n result = []\n root = ElementTree.fromstring(response.content)\n for item in root:\n result.append(item.text)\n return result",
"def parse_investor_list(self, response: Response):\n pass",
"def test_0010(self):\n mock_object_format_list.init(settings.CN_RESPONSES_BASE_URL)\n self.assertIsInstance(\n self.client.listFormats(),\n d1_common.types.generated.dataoneTypes_v2_0.ObjectFormatList,\n )",
"def test_ok_list_returned_tickets(self):\n process_result = process_response(self.resp_ok_list, is_detail=False)\n self.assertEqual(process_result[\"detail\"], self.sample_ok_list)",
"def test_renderer_works_correctly_with_return_list(self):\n test_list = [{\"1\": 1}]\n rendered = self.renderer.render(\n data=ReturnList(test_list, serializer=None),\n media_type=\"application/json\",\n renderer_context={},\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, test_list)",
"def parse_list_parts(data):\n return ListPartsResult(S3Element.fromstring(\"ListPartsResult\", data))",
"def test_group_by_params_string_list_fields(self):\n group_params = {\"node\": \"localhost\"}\n serializer = GroupBySerializer(data=group_params)\n validation = serializer.is_valid()\n self.assertTrue(validation)\n node_result = serializer.data.get(\"node\")\n self.assertIsInstance(node_result, list)",
"def test_list(self):\n response = self.client.get(\"/api_musculib/declination/\")\n assert len(response.data) > 0 and response.status_code == 200",
"def test_unfollow_collection_list_get(self):\n token = Token.objects.get(user__username='test_user')\n url = \"/api/unfollow/collectionList/\"\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n response = client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given A Microsoft Graph List response (collection of objects) When calling parse_list() Then Validate output parsing
|
def test_parse_list_empty():
empty_response = dict()
human_readable_title = "Risks"
context_path = "Risks_path"
parsed = parse_list(empty_response, human_readable_title=human_readable_title, context_path=context_path)
outputs = parsed.outputs
assert outputs == {f'AADIdentityProtection.{context_path}(val.id === obj.id)': []} # no next_link
assert f"{human_readable_title} (0 results)" in parsed.readable_output
assert "**No entries.**" in parsed.readable_output
|
[
"def test_list_cast(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n entire_list = list(self.plist)\n self.assertEqual(entire_list, list(range(self.total)))\n self.assertEqual(len(responses.calls), self.lazy_pages(self.total-1))",
"def test_withListCompleted(self):\n self.assertWellFormedRequest({\"listCompleted\": True})",
"def test_ok_list_result(self):\n process_result = process_response(self.resp_ok_list, is_detail=False)\n self.assertEqual(process_result[\"result\"], 0)",
"def test_result_list_has_annotations(self):\n response = self.get_response(self.changelist_path)\n self.assertEqual(response.status_code, 200)\n self.assertIn('alias_list', response.context['cl'].result_list.query.annotations)",
"def test_get_list(self):\n\t\tinput = get_list('./tests/sample.json')\n\t\tassert isinstance(input, list)",
"def test_result_list_annotated_values(self):\n response = self.get_response(self.changelist_path)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n [\"B.R.M.C.\", \"BRMC\"],\n response.context['cl'].result_list[0].alias_list\n )",
"def test_item_list():\n\n jis = serializer.JSONLDFeedSerializer(\n './tests/files/test_jsonld_item_list_out.json',\n feed_type='ItemList')\n\n for i in range(5):\n mv = schema.Movie()\n mv.name.add().text = 'Movie ' + str(i + 1)\n mv.id = 'Id of Movie ' + str(i + 1)\n for j in range(3):\n actor = mv.actor.add().person\n actor.name.add().text = 'Actor ' + str(j + 1)\n jis.add_item(mv, schema)\n\n jis.close()\n\n with open('./tests/files/test_jsonld_item_list_out.json') as f:\n output = json.load(f)\n\n with open('./tests/files/test_jsonld_item_list.json') as f:\n expected = json.load(f)\n\n os.remove('./tests/files/test_jsonld_item_list_out.json')\n\n assert output == expected, 'Error in Serialization of ItemList.'",
"def test_validator_itemlist():\n\n v = validator.SchemaValidator(\n './tests/files/validator_constraints.ttl',\n './tests/files/test_report.html')\n\n with open('./tests/files/validator_item_list.json') as f:\n dump = json.load(f)\n\n v.add_entity(dump)\n v.close()\n os.remove('./tests/files/test_report.html')\n\n expected = []\n expected.append(utils.ResultRow(\n 'Id: movieid1',\n 'Name of movie must be string.',\n '.name',\n '123',\n 'Violation'\n ))\n expected.append(utils.ResultRow(\n 'Id: id2',\n 'Name of person must be string.',\n '.actor.name',\n '123',\n 'Warning'\n ))\n expected.append(utils.ResultRow(\n 'Id: id3',\n 'Name of organization must be string.',\n '.creator.url',\n '345',\n 'Info'\n ))\n expected.append(utils.ResultRow(\n 'Id: id3',\n 'Name of person must be string.',\n '.creator.name',\n '123',\n 'Warning'\n ))\n\n assert(len(v.reports['Movie']) == len(expected)\n ), 'Expected report count not equal.'\n\n for m in expected:\n assert m in v.reports['Movie'], 'Expected report not generated.'",
"def test_get_list(self):\n #Get and verify the resp\n resp = self.client.get('/api/v1/acknowledgement/')\n self.assertEqual(resp.status_code, 200, msg=resp)\n\n #Verify the data sent\n resp_obj = resp.data\n self.assertIsNotNone(resp_obj['results'])\n self.assertEqual(len(resp_obj['results']), 1)\n self.assertEqual(len(resp_obj['results'][0]['items']), 2)",
"def parse_list(cls, data, **kwargs):\n results = ResultSet()\n data = data or []\n for obj in data:\n if obj:\n results.append(cls.parse(obj, **kwargs))\n return results",
"def test_is_list_true(test_rlp_reader_contract):\n contract = test_rlp_reader_contract\n rlp_encoded_item = rlp.encode([1, 2, 3])\n\n assert contract.functions.testIsList(rlp_encoded_item).call() is True",
"def transform_list_response(response):\n result = []\n root = ElementTree.fromstring(response.content)\n for item in root:\n result.append(item.text)\n return result",
"def parse_investor_list(self, response: Response):\n pass",
"def test_0010(self):\n mock_object_format_list.init(settings.CN_RESPONSES_BASE_URL)\n self.assertIsInstance(\n self.client.listFormats(),\n d1_common.types.generated.dataoneTypes_v2_0.ObjectFormatList,\n )",
"def test_ok_list_returned_tickets(self):\n process_result = process_response(self.resp_ok_list, is_detail=False)\n self.assertEqual(process_result[\"detail\"], self.sample_ok_list)",
"def test_renderer_works_correctly_with_return_list(self):\n test_list = [{\"1\": 1}]\n rendered = self.renderer.render(\n data=ReturnList(test_list, serializer=None),\n media_type=\"application/json\",\n renderer_context={},\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, test_list)",
"def parse_list_parts(data):\n return ListPartsResult(S3Element.fromstring(\"ListPartsResult\", data))",
"def test_group_by_params_string_list_fields(self):\n group_params = {\"node\": \"localhost\"}\n serializer = GroupBySerializer(data=group_params)\n validation = serializer.is_valid()\n self.assertTrue(validation)\n node_result = serializer.data.get(\"node\")\n self.assertIsInstance(node_result, list)",
"def test_list(self):\n response = self.client.get(\"/api_musculib/declination/\")\n assert len(response.data) > 0 and response.status_code == 200",
"def test_unfollow_collection_list_get(self):\n token = Token.objects.get(user__username='test_user')\n url = \"/api/unfollow/collectionList/\"\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n response = client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mocks the request to list detections from the API. The mock will manually take into consideration the filter and limit supplied as parameters. It also accepts the user_id and user_principal_name, to allow full running of fetch (as the actual function receives these parameters).
|
def mock_list_detections(limit, filter_expression, user_id, user_principal_name):
from AzureADIdentityProtection import DATE_FORMAT, date_str_to_azure_format
test_incidents = util_load_json('test_data/incidents.json')
all_possible_results = test_incidents.get('value')
start_time = filter_expression.split('gt ')[-1]
start_time = date_str_to_azure_format(start_time)
start_time_datetime = datetime.strptime(start_time, DATE_FORMAT)
incidents_compliant_with_filter = []
for detection in all_possible_results:
detection_time = date_str_to_azure_format(detection['detectedDateTime'])
detection_datetime = datetime.strptime(detection_time, DATE_FORMAT)
if detection_datetime > start_time_datetime:
incidents_compliant_with_filter.append(detection)
incidents_compliant_with_limit = incidents_compliant_with_filter[:limit]
res = {
'value': incidents_compliant_with_limit
}
return res
|
[
"def test_fetch_detections_success_when_detections_equal_to_max_fetch(requests_mock):\n incidents = load_mock_response(\"mock_incidents.json\")\n\n requests_mock.get(\n f\"{BASE_URL}/api/v1/extrahop/version\", json={\"version\": \"9.3.0.1319\"}\n )\n\n mock_response = load_mock_response(\"fetch_detections_success.json\")\n requests_mock.post(f\"{BASE_URL}/api/v1/detections/search\", json=mock_response)\n\n mock_device_data = load_mock_response(\"mock_device_data.json\")\n requests_mock.get(f\"{BASE_URL}/api/v1/devices/1904\", json=mock_device_data)\n\n client = init_mock_client(requests_mock, on_cloud=False)\n actual_incidents, next_run = ExtraHop_v2.fetch_incidents(client, {}, {}, False)\n\n assert next_run[\"offset\"] == 1\n assert actual_incidents[0][\"name\"] == incidents[0][\"name\"]\n assert actual_incidents[0][\"occurred\"] == incidents[0][\"occurred\"]\n assert actual_incidents[0][\"rawJSON\"] == json.dumps(incidents[0][\"rawJSON\"])",
"def test_list_detections_command_using_advanced_filter(requests_mock):\n requests_mock.get(\n f\"{BASE_URL}/api/v1/extrahop/version\", json={\"version\": \"9.3.0.1319\"}\n )\n client = init_mock_client(requests_mock, on_cloud=False)\n args = {}\n advanced_filter = \"\"\"{\\\"filter\\\": {\\\"categories\\\": [\\\"sec.attack\\\"],\\\"risk_score_min\\\": 51},\n \\\"limit\\\": 1,\\\"offset\\\": 0,\n \\\"sort\\\": [\n {\n \\\"direction\\\": \\\"desc\\\",\n \\\"field\\\": \\\"end_time\\\"\n }\n ]\n }\"\"\"\n\n response = load_mock_response(LIST_DETECTIONS_SUCCESS)\n\n expected_hr = load_file(\"list_detections_success_hr.md\")\n\n requests_mock.post(f\"{BASE_URL}/api/v1/detections/search\", json=response)\n\n results = ExtraHop_v2.detections_list_command(\n client, args, json.loads(advanced_filter)\n )\n assert results.readable_output == expected_hr",
"def test_list_images_param_limit(self):\n params = {\"limit\": 1}\n images_list = self.client.list_images(params=params)['images']\n\n self.assertEqual(len(images_list), params['limit'],\n \"Failed to get images by limit\")",
"def test_view_only_own_filters(self):\n new_user = User.objects.create_user('test2', 'test2@test.com', 'test2')\n SavedFilter.objects.create(filter_json={\"key\": \"value\"}, label=\"A label\", owner=new_user)\n\n # self.user doesn't have any filters, expect 0\n response = self.client.get(self.list_url)\n self.assertEqual(len(json.loads(response.content)['results']), 0)\n\n # New user has a filter, expect 1\n new_client = APIClient()\n new_client.force_authenticate(user=new_user)\n response = new_client.get(self.list_url)\n self.assertEqual(len(json.loads(response.content)['results']), 1)",
"async def test_get_slice_limit(con, mocker):\n run_fetches_mock = mocker.patch.object(\n AircallConnector, 'run_fetches', return_value=[filtered_teams, filtered_calls]\n )\n ds = build_ds('calls')\n df, rows = con.get_slice(ds, limit=2)\n assert run_fetches_mock.call_count == 1\n assert df.shape == (2, 10)\n assert list(df.columns) == columns_for_calls\n assert df['team'].isna().sum() == 0\n assert df['team'].eq('Team 1').sum() == 2\n assert df['team'].eq('Team 2').sum() == 0",
"def test_self_mailers_list_with_limit_param(self):\n self.mock_api.self_mailers_list = self.mock_list_of_self_mailers\n self_mailers = self.mock_api.self_mailers_list(limit=10)\n self.assertIsNotNone(self_mailers)\n self.assertEqual(len(self_mailers[\"data\"]), 2)",
"def test_get_with_filter_person_factoid(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062&p=P00063\")\n assert r.status_code == 200\n assert r.json[\"factoids\"][0][\"@id\"] == \"F00062\"\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062&p=P00064\")\n assert r.status_code == 404",
"def test_list_detections_command_successful_execution_without_category(on_cloud, requests_mock):\n requests_mock.get(\n f\"{BASE_URL}/api/v1/extrahop/version\", json={\"version\": \"9.3.0.1319\"}\n )\n client = init_mock_client(requests_mock, on_cloud)\n args = {\n \"limit\": \"2\",\n \"filter\": \"\"\"{\n \\\"risk_score_min\\\": 51\n }\"\"\",\n \"from\": \"1573500360001\",\n \"offset\": \"2\",\n \"sort\": \"end_time asc,id desc\",\n \"until\": \"1673569370001\",\n }\n response = load_mock_response(LIST_DETECTIONS_SUCCESS)\n\n expected_hr = load_file(\"list_detections_success_hr.md\")\n\n requests_mock.post(f\"{BASE_URL}/api/v1/detections/search\", json=response)\n\n results = ExtraHop_v2.detections_list_command(client, args)\n\n assert results.readable_output == expected_hr\n assert results.outputs_prefix == \"ExtraHop.Detections\"",
"def test_fetch_limit_when_valid_value_success(mocker):\n from FireEyeNX import get_fetch_limit\n\n mocker.patch.object(demisto, 'params', return_value=PARAMS)\n\n fetch_limit = get_fetch_limit(fetch_limit='')\n assert fetch_limit == 50",
"def testListDisks_Filter(self):\n mock_api = self.mock_api_build.return_value\n mock_disk_list = mock_api.disks.return_value.list\n\n mock_disk_list.return_value.execute.return_value = {\n 'items': ['dummy', 'list']\n }\n\n disk_list = self.gce_api.ListDisks('filter condition')\n\n mock_disk_list.assert_called_once_with(\n project='project-name', zone='zone-name', filter='filter condition')\n mock_disk_list.return_value.execute.assert_called_once_with()\n self.assertEqual(['dummy', 'list'], disk_list)",
"def test_filtering_catalogues_by_users(self):\n self._verify_user_catalogues(self.mock_user_1, self.how_many_catalogues)\n self._verify_user_catalogues(self.mock_user_2, 5)",
"def test_search_users(self):\n i = self.instance.search_users(\"tom repos:>42 followers:>1000\")\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"search/users\"),\n params={\"per_page\": 100, \"q\": \"tom repos:>42 followers:>1000\"},\n headers={},\n )",
"def test_photo_edit_get_queryset_list_all_users_photos(self):\n from imager_images.views import PhotoEditView\n request = self.request.get('')\n request.user = self.bob\n view = PhotoEditView(request=request)\n photos = view.get_queryset()\n self.assertEqual(photos.count(), 15)",
"async def jsonrpc_claim_search(self, **kwargs):\n if \"claim_ids\" in kwargs and not kwargs[\"claim_ids\"]:\n kwargs.pop(\"claim_ids\")\n if {'claim_id', 'claim_ids'}.issubset(kwargs):\n raise ConflictingInputValueError('claim_id', 'claim_ids')\n if kwargs.pop('valid_channel_signature', False):\n kwargs['signature_valid'] = 1\n if kwargs.pop('invalid_channel_signature', False):\n kwargs['signature_valid'] = 0\n if 'has_no_source' in kwargs:\n kwargs['has_source'] = not kwargs.pop('has_no_source')\n if 'order_by' in kwargs: # TODO: remove this after removing support for old trending args from the api\n value = kwargs.pop('order_by')\n value = value if isinstance(value, list) else [value]\n new_value = []\n for new_v in value:\n migrated = new_v if new_v not in (\n 'trending_mixed', 'trending_local', 'trending_global', 'trending_group'\n ) else 'trending_score'\n if migrated not in new_value:\n new_value.append(migrated)\n kwargs['order_by'] = new_value\n page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', DEFAULT_PAGE_SIZE)), 50)\n wallet = self.wallet_manager.get_wallet_or_default(kwargs.pop('wallet_id', None))\n kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size})\n txos, blocked, _, total = await self.ledger.claim_search(wallet.accounts, **kwargs)\n result = {\n \"items\": txos,\n \"blocked\": blocked,\n \"page\": page_num,\n \"page_size\": page_size\n }\n if not kwargs.pop('no_totals', False):\n result['total_pages'] = int((total + (page_size - 1)) / page_size)\n result['total_items'] = total\n return result",
"def test_accept_vin_query_param_to_filter(self):",
"def test_list_detections_command_successful_execution_with_category(on_cloud, requests_mock):\n requests_mock.get(\n f\"{BASE_URL}/api/v1/extrahop/version\", json={\"version\": \"9.3.0.1319\"}\n )\n client = init_mock_client(requests_mock, on_cloud)\n args = {\n \"limit\": \"2\",\n \"filter\": \"\"\"{\n \\\"category\\\": \\\"sec.attack\\\",\n \\\"risk_score_min\\\": 51\n }\"\"\",\n \"from\": \"1573500360001\",\n \"offset\": \"2\",\n \"sort\": \"end_time asc,id desc\",\n \"until\": \"1673569370001\",\n }\n response = load_mock_response(LIST_DETECTIONS_SUCCESS)\n\n expected_hr = load_file(\"list_detections_success_hr.md\")\n\n requests_mock.post(f\"{BASE_URL}/api/v1/detections/search\", json=response)\n\n results = ExtraHop_v2.detections_list_command(client, args)\n\n assert results.readable_output == expected_hr\n assert results.outputs_prefix == \"ExtraHop.Detections\"",
"def testListDisks_NoFilter(self):\n mock_api = self.mock_api_build.return_value\n mock_disk_list = mock_api.disks.return_value.list\n\n mock_disk_list.return_value.execute.return_value = {\n 'items': ['dummy', 'list']\n }\n\n disk_list = self.gce_api.ListDisks()\n\n mock_disk_list.assert_called_once_with(\n project='project-name', zone='zone-name', filter=None)\n mock_disk_list.return_value.execute.assert_called_once_with()\n self.assertEqual(['dummy', 'list'], disk_list)",
"def test_get_user_profile(self):\n access_token = dict(urllib.parse.parse_qsl(self.access_token))\n selectors = [\n BasicProfileSelectors.ID, BasicProfileSelectors.FIRST_NAME,\n BasicProfileSelectors.LAST_NAME,\n BasicProfileSelectors.LOCATION, FullProfileSelectors.DATE_OF_BIRTH,\n BasicProfileSelectors.PUBLIC_PROFILE_URL,\n BasicProfileSelectors.PICTURE_URL,\n BasicProfileSelectors.SITE_STANDARD_PROFILE_REQUEST,\n BasicProfileSelectors.TWITTER_ACCOUNTS, BasicProfileSelectors.SUMMARY,\n BasicProfileSelectors.MAIN_ADDRESS\n ]\n\n with patch('linkedin_json_client.api.oauth.Client') as patched_Client:\n # test a successful request without selectors\n data = { # this was an actual response from API\n \"firstName\": \"John\",\n \"headline\": \"Tester at Product Testing\",\n \"lastName\": \"Smith\",\n \"siteStandardProfileRequest\": {\n \"url\": \"http://www.linkedin.com/profile?viewProfile=\"\n \"&key=169364659&authToken=idE-&authType=name\"\n \"&trk=api*a165186*s173442*\"}\n }\n data_str = simplejson.dumps(data)\n\n client = patched_Client.return_value\n client.request.return_value = (\n self._responseFactoryAPI({\n 'content-length': '248',\n 'content-location':\n 'https://api.linkedin.com/v1/people/~?oauth_body_hash'\n '=2jmj7l5rSw0yVb%2FvlWAYkK%2FYBwk%3D&oauth_nonce='\n '4922800&oauth_timestamp=1351725442&'\n 'oauth_consumer_key=q00tdja3bfzo&format=json&'\n 'oauth_signature_method=HMAC-SHA1&oauth_version=1.0&'\n 'oauth_token=ef0bfbcc-1144-4c5d-a73b-b40c26605da2&'\n 'oauth_signature=GLkQMUFaheMJ8y%2Bg2PRomHe8J6I%3D',\n }),\n data_str)\n profile = self.api.get_user_profile(access_token)\n self.failUnlessEqual(profile, data)\n\n # test a successful request with selectors\n data = { # this was an actual response from API\n \"firstName\": \"John\",\n \"id\": \"OAwW7wk0xl\",\n \"lastName\": \"Smith\",\n \"location\": {\n \"country\": {\"code\": \"us\"},\n \"name\": \"San Francisco Bay Area\"\n },\n \"publicProfileUrl\": \"http://www.linkedin.com/pub/john-smith/\"\n \"48/877/b57\",\n \"siteStandardProfileRequest\": {\n \"url\": \"http://www.linkedin.com/profile?viewProfile=\"\n \"&key=169364659&authToken=idE-&authType=name\"\n \"&trk=api*a165186*s173442*\"\n },\n \"twitterAccounts\": {\"_total\": 0}\n }\n data_str = simplejson.dumps(data)\n\n client = patched_Client.return_value\n client.request.return_value = (\n self._responseFactoryAPI({\n 'content-length': '427',\n 'content-location':\n 'https://api.linkedin.com/v1/people/~:(id,first-name,'\n 'last-name,location,date-of-birth,public-profile-url,'\n 'picture-url,site-standard-profile-request,'\n 'twitter-accounts,summary,main-address)?'\n 'oauth_body_hash=2jmj7l5rSw0yVb%2FvlWAYkK%2FYBwk%3D&'\n 'oauth_nonce=18576507&oauth_timestamp=1351720755&'\n 'oauth_consumer_key=q00tdja3bfzo&format=json&'\n 'oauth_signature_method=HMAC-SHA1&oauth_version=1.0&'\n 'oauth_token=ef0bfbcc-1144-4c5d-a73b-b40c26605da2&'\n 'oauth_signature=ZSNPv8LmiuTtmE1ON%2F0kR0K1r6Y%3D',\n }),\n data_str)\n profile = self.api.get_user_profile(\n access_token, selectors=selectors)\n self.failUnlessEqual(profile, data)\n\n # smoke test that names from actual request match constants\n self.failUnlessEqual(\n profile[BasicProfileFields.FIRST_NAME],\n data[BasicProfileFields.FIRST_NAME])\n self.failUnlessEqual(\n profile[BasicProfileFields.LAST_NAME],\n data[BasicProfileFields.LAST_NAME])\n self.failUnlessEqual(\n profile[BasicProfileFields.ID],\n data[BasicProfileFields.ID])\n self.failUnlessEqual(\n profile[BasicProfileFields.PUBLIC_PROFILE_URL],\n data[BasicProfileFields.PUBLIC_PROFILE_URL])\n self.failUnlessEqual(\n profile[BasicProfileFields.LOCATION],\n data[BasicProfileFields.LOCATION])\n self.failUnlessEqual(\n profile[BasicProfileFields.SITE_STANDARD_PROFILE_REQUEST],\n data[BasicProfileFields.SITE_STANDARD_PROFILE_REQUEST])\n self.failUnlessEqual(\n profile[BasicProfileFields.TWITTER_ACCOUNTS],\n data[BasicProfileFields.TWITTER_ACCOUNTS])",
"def get_top10_films_by_genre_name(current_user, genre_name):\r\n\r\n url = \"https://unogsng.p.rapidapi.com/search\"\r\n\r\n genre_id = str(get_genre_id_by_name(genre_name))\r\n genre_id = genre_id.replace('{','')\r\n genre_id = genre_id.replace('}','')\r\n\r\n parameter_list = {\"genrelist\": f\"{genre_id}\",\"orderby\":\"rating\",\r\n \"limit\":\"10\"} \r\n\r\n querystring = {}\r\n\r\n # Fill in the entries one by one if they have values\r\n for key in parameter_list:\r\n if parameter_list[key]:\r\n if parameter_list[key] != \"\":\r\n querystring[key] = parameter_list[key]\r\n\r\n headers = {\r\n 'x-rapidapi-key': \"\",\r\n 'x-rapidapi-host': \"unogsng.p.rapidapi.com\"\r\n }\r\n\r\n headers['x-rapidapi-key'] = os.environ.get('API_TOKEN_1') \r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n\r\n #take the response and unpack it into a workable format\r\n search_results = json.loads(response.text)\r\n search_results_values = search_results.values()\r\n\r\n #extract the embedded dictionary from 2 levels down in results\r\n try:\r\n listify_results = list(search_results_values)\r\n result_list = listify_results[2] \r\n\r\n except IndexError:\r\n return {\"error\": \"your search was too specific and returned no results. please try again.\"}\r\n \r\n\r\n #then wrap it back into a dictionary using index/result number as key\r\n recommendations = dict()\r\n\r\n for index, movie in enumerate(result_list):\r\n recommendations[index + 1] = movie\r\n\r\n # store results, qstr, and login_user in the query_history table\r\n add_query_to_query_history(current_user, str(querystring), \r\n str(recommendations), str(genre_id), None, None, \r\n None, None, None, None, None, None)\r\n\r\n return recommendations"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mocks the function that retrieves the fetch time that should be used.
|
def mock_get_last_fetch_time(last_run, params):
last_fetch = last_run.get('latest_detection_found')
if not last_fetch:
# To handle the fact that we can't freeze the time and still parse relative time expressions such as 2 days
last_fetch = "2021-07-16T11:08:55.000Z"
return last_fetch
|
[
"def time_mock(mocker):\n\n TimeMock(mocker)",
"def test_get_time_tracking_entry(self):\n pass",
"def mock_time(self, t):\n utils._micros_since_epoch = lambda : t",
"def fetch_time(self) -> float:\n return self.navigation_timing.response_end - self.navigation_timing.fetch_start",
"def test_get_time_opt_def(self):\n self.dbgfunc()\n obj = CrawlConfig.CrawlConfig.dictor(self.sample)\n self.expected(388, obj.get_time('crawler', 'dumpling', 388))\n self.expected(47, obj.get_time('crawler', 'strawberry', 47))\n self.expected(17.324, obj.get_time('crawler', 'beeswax', 17.324))\n self.assertRaisesMsg(U.HpssicError,\n MSG.default_int_float,\n obj.get_time,\n 'crawler',\n 'fiddle',\n 'foobar')",
"def get_time(self): # TEST\n return self._game.get_time()",
"def test_get_timestamp(self, mock_get_ss_timestamp, mock_conn):\n mock_get_ss_timestamp.return_value = self.fake_timestamp\n actual = scanner._get_timestamp(self.FAKE_global_configs)\n self.assertEqual(1, mock_get_ss_timestamp.call_count)\n self.assertEqual(self.fake_timestamp, actual)",
"def test_user_tracked_times(self):\n pass",
"def test_user_current_tracked_times(self):\n pass",
"def test_cached_get(self):\n # mock up a sessions compatible error response, and pretend to have a\n # good test response cached.\n self.mock_sessions.post.return_value = DummyResponse(self.error_xml)\n self.cache.get.return_value = self.test_xml\n\n result, current, expires = self.api.get('foo/Bar', {'a':[1,2,3]})\n\n rowset = result.find('rowset')\n rows = rowset.findall('row')\n self.assertEqual(len(rows), 2)\n self.assertEqual(rows[0].attrib['foo'], 'bar')\n\n self.assertFalse(self.mock_sessions.post.called)\n # timestamp attempted to be extracted.\n self.assertEqual(self.api.last_timestamps, {\n 'current_time': 1255885531,\n 'cached_until': 1258563931,\n })\n self.assertEqual(current, 1255885531)\n self.assertEqual(expires, 1258563931)",
"def get_last_fetch_timestamp(last_fetch, time_method, fetch_time):\n if last_fetch:\n last_fetch_timestamp = last_fetch\n else:\n last_fetch, _ = parse_date_range(date_range=fetch_time, utc=False)\n # if timestamp: get the last fetch to the correct format of timestamp\n last_fetch_timestamp = int(last_fetch.timestamp() * 1000)\n if 'Timestamp - Seconds' in time_method:\n last_fetch_timestamp = last_fetch_timestamp // 1000\n return last_fetch_timestamp",
"def test_estimates_time_get(self):\n query_string = [('start_latitude', 1.2),\n ('start_longitude', 1.2),\n ('customer_uuid', 'customer_uuid_example'),\n ('product_id', 'product_id_example')]\n response = self.client.open(\n '/v1/estimates/time',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_cache_timeout(self):\n @cache.Cache(timeout=1)\n def return_value(value):\n return time.time()\n value=return_value(0)\n time.sleep(2)\n self.assertNotEqual(value, return_value(0))",
"def test_time():\n response = client.get(\"/api/time/moscow\")\n response_json = response.json()\n\n # time of the api\n response_datetime = datetime(response_json[\"year\"], response_json[\"month\"], response_json[\"day\"],\n response_json[\"hour\"], response_json[\"minute\"], response_json[\"seconds\"])\n\n tz = pytz.timezone('Europe/Moscow')\n # time of the server in Moscow timezone\n moscow_now = datetime.now(tz).replace(tzinfo=None)\n assert abs(response_datetime - moscow_now).total_seconds() <= 5",
"def test_get_hard_cache_miss(self):\n\n self.mock.get = Mock(return_value=None)\n\n value = self.cache.get(\"foo\", default=\"default\")\n self.assertEquals(\"default\", value)\n\n # Check the get method\n self.assertEquals(1, self.mock.get.call_count)\n self.mock.get.assert_called_with(\"foo\", version=None)",
"def test_benchmark_get(self):\n benchmark_config = resttest.Benchmark();\n benchmark_config.url = self.prefix + '/api/person/'\n benchmark_config.add_metric('total_time').add_metric('total_time','median')\n benchmark_result = resttest.run_benchmark(benchmark_config)\n print(\"Benchmark - median request time: \" + str(benchmark_result.aggregates[0]))\n self.assertTrue(benchmark_config.benchmark_runs, len(benchmark_result.results['total_time']))",
"def test_update_time_tracking_entry(self):\n pass",
"def testGetCachedTime(self):\n now = time.time()\n cache = twitter._FileCache()\n cache.Set(\"foo\", 'Hello World!')\n cached_time = cache.GetCachedTime(\"foo\")\n delta = cached_time - now\n self.assertTrue(delta <= 1,\n 'Cached time differs from clock time by more than 1 second.')\n cache.Remove(\"foo\")",
"def test_gettime(test_class):\n sys = test_class()\n sim = Sim()\n sim.add_system(sys)\n integrationlength = 2.0\n assert sim.get_time() == 0.0\n sim.simulate(integrationlength, 0.1)\n assert sim.get_time() == integrationlength"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets dps data/ properties
|
def set(self, dps_data):
base_payload = OrderedDict([("devId", self.dev_id), ("dps", dps_data), ("t", int(time.time()))])
enc_payload = self.message.compose('set', base_payload)
return self.communicate(enc_payload)
|
[
"def setMPxData(*args, **kwargs):\n \n pass",
"def setData(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def draw_data_property(self, dp):\n\n # draw the data property\n o = self.scene.mlab.points3d(dp.x, dp.y, dp.z, color=green, colormap=\"copper\", scale_factor=2, resolution=8)\n\n # get the subject of the property\n r = dp.resource\n \n # find coordinates for the edge and for its name\n u = numpy.linspace(r.x, dp.x, 10)\n v = numpy.linspace(r.y, dp.y, 10)\n w = numpy.linspace(r.z, dp.z,10) \n pred_x = numpy.mean(u)\n pred_y = numpy.mean(v) \n pred_z = numpy.mean(w)\n\n # draw the edge\n st = time.time()\n p = self.scene.mlab.plot3d(u, v, w, color=green, tube_radius=.25)\n et = time.time()\n logging.debug(\"Sphere drawn in %s ms\" % (round(et-st, 3) * 1000))\n \n # return\n return p, o",
"def set_survey_properties(self,iSurveyID,aSurveyData):",
"def SetPointData(self, *args):\n return _itkPointSetPython.itkPointSetD2S_SetPointData(self, *args)",
"def SetPointData(self, *args):\n return _itkPointSetPython.itkPointSetPD33S_SetPointData(self, *args)",
"def set_group_properties(self,iGroupID,aGroupData):",
"def setData(self, data: DataDictBase):\n self.data = data\n if self.plotWidget is not None:\n self.plotWidget.setData(self.data)",
"def __set_data(self, data):\n ent = self.__entity_ref()\n self.set_state_data(ent, data)",
"def __setstate__(self, d):\n self.initialize()\n for (key, value) in d.items():\n setattr(self, key, value)\n #Ok, now fix everything\n # self.inst should be good though, its own setstate does it.\n self.initialize_reflections()\n self.recalculate_reflections(None, calculation_callback=None)\n self.initialize_volume_symmetry_map()\n self.calculate_coverage(None, None)",
"def setPanelData(self, tag):\n displayPanel = gv.iChartPanel0 if tag == 'M' else gv.iChartPanel1\n dataList = self.modelD if tag == 'M' else self.dataD\n displayPanel.clearData() # call the clearData to clear the panel record.\n for idx, dataSet in enumerate(dataList):\n for num in random.sample(dataSet, len(dataSet)*self.sampleRate//100):\n if num//1000 >= SAMPLE_COUNT: continue # filter the too big data.\n displayPanel.dataD[idx][num//1000] += 1\n displayPanel.dataD[idx][1] = displayPanel.dataD[idx][0]\n displayPanel.dataD[idx][0] = 0\n displayPanel.dataD[idx][-1] = 0 \n # temperary for compare mode active. \n if tag == 'M' and gv.iChartPanel0.compareOverlay:\n displayPanel.dataD[-1] = gv.iChartPanel1.dataD[0]",
"def __init__(self, props, data):\n name = 'KangPolvani'\n super(STJKangPolvani, self).__init__(name=name, props=props, data=data)\n self.wh_200 = 20000.0 / self.data.cfg[\"pfac\"]\n self.wh_1000 = 100000.0 / self.data.cfg[\"pfac\"]",
"def _set_instance_variables(self, data):\n self.start_configs = data['start_configs']\n self.waypt_configs = data['waypt_configs']\n self.start_speeds = data['start_speeds']\n self.spline_trajectories = data['spline_trajectories']\n self.horizons = data['horizons']\n self.lqr_trajectories = data['lqr_trajectories']\n self.K_nkfd = data['K_nkfd']\n self.k_nkf1 = data['k_nkf1']\n \n # Initialize variable tensor for waypoints in world coordinates\n dt = self.params.system_dynamics_params.dt\n self.waypt_configs_world = [SystemConfig(\n dt=dt, n=config.n, k=1, variable=True,\n track_trajectory_acceleration=self.params.track_trajectory_acceleration) for config in data['start_configs']]\n\n self.instance_variables_loaded = True\n\n if self.params.verbose:\n N = self.params.waypoint_params.n\n for v0, start_config in zip(self.start_velocities, self.start_configs):\n print('Velocity: {:.3f}, {:.3f}% of goals kept({:d}).'.format(v0, 100.*start_config.n/N,\n start_config.n))",
"def set_setup_ds_commnad(self):\n self.setup_ds_cmd = [\"setup-ds.pl --silent \\\\\"]\n self.setup_ds_cmd.append(\"General.FullMachineName =\" + self.hostname + \" \\\\\")\n self.setup_ds_cmd.append(\"General.SuiteSpotUserID =\" + self.user + \" \\\\\")\n self.setup_ds_cmd.append(\"General.SuiteSpotUserID =\" + self.user + \" \\\\\")\n self.setup_ds_cmd.append(\"General.SuiteSpotGroup =\" + self.group + \" \\\\\")\n self.setup_ds_cmd.append(\"slapd.ServerPort =\" + self.ds_port + \" \\\\\")\n self.setup_ds_cmd.append(\"slapd.ServerIdentifier = ca \\\\\")\n self.setup_ds_cmd.append(\"slapd.Suffix =\" + self.suffix +\" \\\\\")\n self.setup_ds_cmd.append(self.root_dn + \" \\\\\")\n self.setup_ds_cmd.append(\"slapd.RootDNPwd = \" + self.root_pwd)\n\n print(self.setup_ds_cmd)\n \n self.ds_service_enable_cmd = \"systemctl enable dirsrv.target\"\n self.ds_service_start_cmd = \"systemctl start dirsrv.target\"",
"def process_property(self, data):\r\n if data.arguments['p'] == 'title':\r\n self.title.content = data.arguments['value']\r\n self.title.by = data.arguments['by']\r\n self.title.ts = data.arguments['ts']\r\n \r\n if data.arguments['p'] == 'topic':\r\n self.topic.content = data.arguments['value']\r\n self.topic.by = data.arguments['by']\r\n self.topic.ts = data.arguments['ts']\r\n \r\n if data.arguments['p'] == 'privclasses':\r\n self.pc = Packet(data.arguments['value'], ':').args\r\n self.pc_order = sorted(self.pc.keys(), key=int)\r\n self.pc_order.reverse()\r\n \r\n if data.arguments['p'] == 'members':\r\n member = Packet(data.arguments['value'])\r\n while member.cmd != None and len(member.args) > 0:\r\n self.register_user(member)\r\n member = Packet(member.body)",
"def configure_ddp(self):\n self.pre_configure_ddp()\n self._model = DistributedDataParallel(\n LightningDistributedModule(self.model),\n **self._ddp_kwargs,\n )\n self._register_ddp_hooks()",
"def setup_data(self, domain=None):\n\t\tself.data = frappe.get_domain_data(self.name)",
"def set_parameters(parameters):\n if parameters:\n for p_name, p_value in parameters.items():\n setattr(DataHelper, p_name, p_value)",
"def __setstate__(self, d):\n d = param_aliases(d)\n try:\n load_options = Store.load_counter_offset is not None\n if load_options:\n matches = [k for k in d if k.startswith('_custom_option')]\n for match in matches:\n custom_id = int(match.split('_')[-1])\n if not isinstance(d[match], dict):\n # Backward compatibility before multiple backends\n backend_info = {'matplotlib':d[match]}\n else:\n backend_info = d[match]\n for backend, info in backend_info.items():\n if backend not in Store._custom_options:\n Store._custom_options[backend] = {}\n Store._custom_options[backend][Store.load_counter_offset + custom_id] = info\n\n d.pop(match)\n\n if d['id'] is not None:\n d['id'] += Store.load_counter_offset\n else:\n d['id'] = None\n except:\n self.warning(\"Could not unpickle custom style information.\")\n self.__dict__.update(d)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set LED brightness to X%
|
def set_brightness(self, percentage):
try:
percentage = round(percentage, 1)
except TypeError:
raise ValueError("percentage must be numeric (integer, float)")
percentage = max(1, percentage) * 10
return self.set(OrderedDict([(DPS.POWER, True), (DPS.BRIGHT, percentage)]))
|
[
"def setBrightness(self, value = 0):\n\t\tgrovepi.fourDigit_brightness(self.display, value)",
"def set_backlight(val):\n val = max(0, min(1.0, val))\n board.DISPLAY.auto_brightness = False\n board.DISPLAY.brightness = val",
"def update_led(self):\n if self.pwm < 300:\n self.set_led_function([255, 0, 0], \"legs\", \"\", \"\")\n else:\n percentage = self.pwm / 4095\n blue = 255 * percentage\n self.set_led_function([0, 0, blue], \"legs\", \"\", \"all\")",
"def set_brightness(self, value=25):\n # byte2: 0x02 to 0x1B (2-27)\n\n value = max(0, min(25, value))\n value += 2\n\n self.on()\n LOGGER.info('Swiching light brightness to {1} in group {0}'.format(self._group, value))\n MiLightCommand(self.BRIGHTNESS, bytes[(value)]).send()",
"def on(self):\n self.set_brightness(100)",
"def set_brightness(self, brightness):\n self._api.set_brightness(brightness)\n self.update()",
"def adjust_brightness(self, brightness):\n self.pixels = neopixel.NeoPixel(board.D18, self.LED_COUNT, brightness=brightness)",
"async def brightness_set(self, ctx, brightness: int = 254, *, name=None):\n if not await self.get_bridge():\n await ctx.send(\"No IP has been set.\")\n return\n brightness = await self.max_min_check(brightness, 254, 0)\n for light in self.lights:\n if name is None or light.name.lower() == name.lower() and light.on:\n light.brightness = brightness",
"def increment_brightness(self, amount=0.0):\n a = amount if amount is not 0.0 else self.bi\n if self.bi+a > 1.0:\n self.bi = 1.0\n elif self.bi < 0.0:\n self.bi = 0\n else:\n self.bi += a\n\n self.draw()",
"def camChangeBrightness(self, value):\n # Because QSliders does not work with decimal values, we use percent\n # instead. So we need to get a float between 0 and 1 then we set the\n # value.\n # And because the range is from -50% to +50%, we add 50 to get a positive\n # value.\n self.gui.video.device.setBrightness((value + 50.0) / 100.0)",
"def set_brightness(self, level):\r\n if level <= 0:\r\n self._change_color(**self.offcolor)\r\n # LED must be on or else it cannot get brighter\r\n elif self.state or level < self.current['brightness']:\r\n if level >= 1:\r\n self._change_color(**self.oncolor)\r\n else:\r\n # color luminosity goes from on.color's 1/4 to full luminosity -> 0.25*hls[1] to hsl[1]\r\n lum = self.oncolor_hls[1] * (0.75 * level + 0.25)\r\n if lum < 0.2 * self.bulbcolor_hls[1]:\r\n color = self.offcolor['led']\r\n reflect = self.offcolor['reflection']\r\n else:\r\n color = \"#%04x%04x%04x\" % tuple(\r\n [int(round(i * 65535, 0)) for i in hls_to_rgb(self.oncolor_hls[0], lum, self.oncolor_hls[2])])\r\n\r\n # reflector luminosity goes from on.color's 1/2 luminosity to 1 (ie white) -> 0.5*hls[1] to 1\r\n hue = self.oncolor_hls[0]\r\n if self.usemonotonereflection:\r\n sat = 0\r\n targetlum = self.bulbcolor_hls[1]\r\n else:\r\n sat = self.oncolor_hls[2]\r\n targetlum = self.oncolor_hls[1]\r\n if self.usereflectquadraticstep:\r\n power = 2\r\n else:\r\n power = 1\r\n lum = (1 - 0.5 * targetlum) * level ** power + 0.5 * targetlum\r\n reflect = \"#%04x%04x%04x\" % tuple([int(round(i * 65535, 0)) for i in hls_to_rgb(hue, lum, sat)])\r\n\r\n self._change_color(led=color, reflection=reflect, brightness=level)",
"def hass_to_myhomeserver_brightness(value: int):\n return int((value / 255.0) * 100)",
"def set_led(jPin, brightness):\n j_pins[jPin-1].write_analog(brightness)",
"def set_brightness(self, brightness: float):\n raise NotImplementedError",
"def grid_brightness(self, val: float):\n assert 0.0 <= val <= 1.0\n self.visa_write(f':GBR {int(val * 100)}')",
"def setColorIntensity(self, intensity):\n self.listener.sendData(\"%s %d\" %( BluetoothLampCommand.ACTION_SET_COLOR_INTENSITY, intensity))",
"def set_rgb_led(red, gre, blu):\n\n red = red * 4\n blu = blu * 4\n gre = gre * 4\n\n for value in [red, gre, blu]:\n\n if value > 1023:\n value = 1023\n \n if value < 0:\n value = 0\n\n RED_LED_PWM.duty(red)\n GRE_LED_PWM.duty(gre)\n BLU_LED_PWM.duty(blu)",
"def brighten(val, minval):\n return minval + (255 - minval) * val // 255",
"def adjust_backlight(display):\n current_time = time.localtime()\n if current_time.tm_hour >= BACKLIGHT_DIMMING_START or \\\n current_time.tm_hour < BACKLIGHT_DIMMING_END:\n display.brightness = BACKLIGHT_DIMMING_VALUE\n else:\n display.brightness = BACKLIGHT_DEFAULT_VALUE"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find tuya compatible devices using gid/devid can be found in apps for smart devices. Use MITM to find localkey, capture the packets from smart home/LED app
|
def find_devices(gid=None, key=''):
import concurrent.futures
def checkport(port):
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(('', port))
sock.settimeout(10)
cipher = SyskaCipher()
start = time.time()
while True:
try:
data_json = json.loads(cipher.udp_decrypt(sock.recv(1024)))
except timeout:
break
print(json.dumps(data_json, indent=4), "\n\n")
if gid == data_json.get('gwId', -1):
return SyskaLed(gid, key, data_json.get('ip'))
if time.time() - start > 11:
break
return None
with concurrent.futures.ThreadPoolExecutor() as executor:
f1 = executor.submit(checkport, 6666)
f2 = executor.submit(checkport, 6667)
return f1.result() or f2.result()
|
[
"def find_devices(controller):\n pysicl.gpib_timeout(500)\n for addr in range(1,31):\n print addr\n if addr != 21:\n status = dev_status(controller+str(addr))\n print addr,status\n if status > -1:\n print addr,\":\",status\n pysicl.gpib_timeout(10000)",
"def find_megaraid_devices():\n has_proc_name = (item for item in os.walk('/sys/devices/') if 'proc_name' in item[2])\n for item in has_proc_name:\n fn = item[0] + '/proc_name'\n with open(fn, 'r') as fd:\n driver = fd.read().strip()\n if driver == 'megaraid_sas':\n yield item[0]",
"def find():\n paths = glob.glob('/sys/class/hidraw/*/device/uevent')\n devices = []\n for path in paths:\n with open(path, 'r') as dev:\n for line in dev:\n if HID_ID.match(line):\n chunks = path.split('/')\n devices.append(chunks[4])\n return [PlasmaTrim(\"/dev/%s\" % dev) for dev in devices]",
"def discover_atag():\r\n # return format: [b'ONE xxxx-xxxx-xxxx_xx-xx-xxx-xxx (ST)',\r\n # ('xxx.xxx.x.x', xxxx)]\r\n # sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # UDP\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n sock.settimeout(30)\r\n sock.bind((\"\", 11000))\r\n try:\r\n while True:\r\n result = sock.recvfrom(37)\r\n host_ip = result[1][0]\r\n device_id = result[0].decode().split()[1]\r\n return host_ip, device_id\r\n except socket.timeout:\r\n return False\r\n except Exception as err:\r\n raise RequestError(err)",
"def get_devices():\n print(\"Scanning for available devices.\")\n nearby_devices = bluetooth.discover_devices()\n out = {}\n for bdaddr in nearby_devices:\n name = bluetooth.lookup_name(bdaddr)\n out[name] = bdaddr\n if out is not None:\n print(\"Found the following devices:\")\n print_devices(out)\n print(\"\")\n else:\n print(\"Found no devices.\")\n return(out)",
"def daqfind():\r\n\r\n \"\"\"for i in range(0,128):\r\n daq = 'Dev' + str(i)\r\n\r\n sys.stdout.write('Dev')\r\n print i, '-', cat[str(category[0])], 'found(sw)'\r\n if (serialnumber == 0):\r\n sys.stdout.write('Dev')\r\n print i, '-', cat[str(category[0])], 'found(hw) - Disconnected'\r\n else:\r\n sys.stdout.write('Dev')\r\n print i, '-', cat[str(category[0])], 'found(hw)'\r\n \"\"\"\r\n sound = pyaudio.PyAudio()\r\n host_api = sound.get_default_host_api_info()\r\n inputs = sound.get_default_input_device_info()\r\n outputs = sound.get_default_output_device_info()\r\n \r\n #print host_api\r\n #print inputs\r\n #print outputs\r\n\r\n\r\n \r\n dev_info = []\r\n handles = []\r\n\r\n for i in range(0,sound.get_host_api_count()):\r\n handles.append(i)\r\n\r\n #print '%(id)-8s%(ai)-8s%(ao)-8s%(di)-8s%(do)-8s%(ci)-8s%(co)-8s'% dev_info[nd]\r\n return handles\r\n #print sound.get_default_input_device_info()\r\n #print sound.get_device_count()\r\n #print sound.get_device_info_by_index(7)\r\n #sound.get_host_api_count()\r\n #sound.get_host_api_info_by_index(0)\r\n #sound.get_default_host_api_info()\r\n #sound.get_default_input_device_info()\r\n #sound.get_default_output_device_info()\r",
"def _matched_devices_by_uuid(self, search_target: str) -> List[UpnpDevice]:\n return [\n device\n for device in self.device.all_devices\n if device.udn.lower() == search_target\n ]",
"def list_devices():\n devs = find_all_vkb()\n if not devs:\n print(\"No VKB devices found\")\n return\n\n for i, dev in enumerate(devs):\n print(f\" {i:>2}: {dev.name} ({dev.guid})\")",
"def getDevices():\n\n # Create a list\n suitable_devices = []\n\n # Iterate over each device\n for i in range(p.get_device_count()):\n # Extract device metadata (note: there is more available)\n d = {}\n d['name'] = p.get_device_info_by_index(i)['name'].split(':')[0]\n d['dev_index'] = i\n d['rate'] = int(p.get_device_info_by_index(i)['defaultSampleRate'])\n d['channels'] = p.get_device_info_by_index(i)['maxInputChannels']\n\n # Check if device has input channels and isn't the default fake device\n if (d['channels'] >= 1 and d['name'] != 'default'):\n # Save it to list\n suitable_devices.append(d)\n\n return suitable_devices",
"def list_optomux_devices(self):\n devices = []\n for address in range(256):\n msg = 'checking address {:02X}'.format(address)\n print(msg,end='',flush=True)\n print(chr(8)*len(msg),end='',flush=True)\n rtn = self.power_up_clear(address)\n if rtn[0] == 'A':\n rtn = self.identify_optomux_type(address)\n if rtn[0] == 'A':\n print('Found {:s} device at address {:02X}'\\\n .format(self.optomux_type[int(rtn[1])],address))\n devices.append(address)\n print('\\nDone')\n return devices",
"def _get_devices(self):\n with self.lock:\n self.cc_audios, self.cc_groups = CcAudioStreamer.get_devices()\n\n # sort the lists alphabetically by name\n self.cc_audios.sort(key=lambda x: x.name)\n self.cc_groups.sort(key=lambda x: x.name)\n\n # current mapping scheme has a limit of 10 devices and groups\n MAX_LIMIT = 10\n assert len(self.cc_audios) + len(self.cc_groups) <= MAX_LIMIT, \"Update code to handle more than 10 CCA devices and groups\"\n\n # NOTE: this code will fail for more than 10 devices+groups\n keys = [str((i+1)%10) for i in range(10)] # ['1', ..., '9', '0']\n self.cc_key_mapping = dict(zip(keys, self.cc_audios))\n self.cc_key_mapping.update(dict(zip(reversed(keys), self.cc_groups)))\n\n #print(\"LEN\", len(self.cc_key_mapping))\n #print(self.cc_key_mapping)",
"def query_devices():\n return devices.find()",
"def scan_devices(): # {\n logger.info(\"IN scan_devices\")\n\n devices_dict = thePlayer.scan_devices()\n devices_list = [\"%s,%s\" % (k, cc.name) for k, cc in devices_dict.items()]\n try:\n devices = \"\\n\".join(devices_list)\n except TypeError:\n devices = \"\\n\".join([\"??\"]*7)\n bdevices = devices.encode()\n self.send_header(\"Content-Length\", str(len(bdevices)))\n self.end_headers()\n self.wfile.write(bdevices)\n self.wfile.flush()",
"def find_device_in_ipam(ip, devices, logger):\n logger.debug('%s - Getting the device from the devices of NSoT.', ip)\n for device in devices:\n if 'attributes' in device:\n if 'address' in device['attributes']:\n if device['attributes']['address'] == ip:\n return device",
"def list_devices():\n out = subprocess.check_output([\"colormgr\", \"get-devices-by-kind\", \"display\"])\n for line in out.decode(\"utf8\").split(\"\\n\"):\n if line.startswith(\"Model:\"):\n print(line.split(\":\")[1].lstrip())",
"def list() -> None:\n devs = nkfido2.find_all()\n local_print(\":: 'Nitrokey FIDO2' keys\")\n for c in devs:\n assert isinstance(c.dev, CtapHidDevice)\n descr = c.dev.descriptor\n\n if hasattr(descr, \"product_name\"):\n name = descr.product_name\n elif c.is_bootloader():\n name = \"FIDO2 Bootloader device\"\n else:\n name = \"FIDO2 device\"\n\n if hasattr(descr, \"serial_number\"):\n id_ = descr.serial_number\n else:\n assert isinstance(descr.path, str)\n id_ = descr.path\n\n local_print(f\"{id_}: {name}\")",
"def ltm_discover(self, config, devid):\n iq = self.config['bigiq']\n ip = config['bigip']\n username = config['ip_username']\n password = config['ip_password']\n iq_username = config['iq_username']\n iq_password = config['iq_password']\n\tself.logger.info(\"Discover BIGIP {0} in Device\".format(ip))\n\n uri= 'https://' + iq + '/mgmt/cm/global/tasks/device-discovery'\n link = 'https://localhost/mgmt/cm/system/machineid-resolver/{0}'.format(devid)\n\n device_json = {'deviceReference': {\"link\": link}, 'moduleList': [{'module': 'adc_core'}], \"status\":\"STARTED\"}\n\n result=0\n response = requests.post(uri, data=str(device_json), auth=(iq_username, iq_password), verify=False)\n\tjson_str = response.json()\n\n uri=json_str['selfLink'].replace('localhost', iq)\n i=0\n while True:\n response = requests.get(uri, auth=(config['iq_username'], config['iq_password']), verify=False)\n\t json_str = response.json()\n\n if json_str['status'] == 'FINISHED':\n result=1\n break\n elif json_str['status'] == 'FAILED':\n result=0\n break\n else:\n time.sleep(1)\n i+=1\n self.logger.info(\"Discovery Status = {0} expecting FINISHED. {1}\".format(json_str['status'], i))\n\n\n if result==1:\n return True\n else:\n return False",
"def find_usb_serial_devices():\n devicelist=[]\n\n if \"posix\" in os.name:\n # first, look for ttyUSB devices, and filter those supported\n devices=sorted(glob.glob(\"/dev/ttyUSB*\"));\n for device in devices:\n devicelist.append((device,__identify_usb_serial_device(device)))\n # next, look for ttyACM devices, and accept unfiltered (mbed)\n devices=sorted(glob.glob(\"/dev/ttyACM*\"));\n for device in devices:\n devicelist.append((device,\"ttyacm\"))\n return devicelist",
"def get_test_device():\n\n devices = []\n for node_name,node in LOCAL[\"node\"].iteritems():\n device = node[\"device\"]\n if device not in devices: devices.append(device)\n return devices"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize this readwrite lock.
|
def __init__(self, lock=None):
# Condition variable, used to signal waiters of a change in object
# state.
if lock is None:
self.__condition = Condition(Lock())
else:
self.__condition = Condition(lock)
# Initialize with no writers.
self.__writer = None
self.__upgradewritercount = 0
self.__pendingwriters = []
# Initialize with no readers.
self.__readers = {}
|
[
"def acquire_read(self):\n with self.monitor:\n if self.rwlock == -1 and self.writer == threading.currentThread():\n #We already have a write lock - we don't acquire try to acquire\n # a read lock.- we increment the number of write locks.\n self.wcount +=1 \n else:\n return self._acquire_read()",
"def __init__(self):\n this = _coin.new_SoLockManager()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self):\n # Latch to prevent race conditions.\n self.__lock = threading.Condition()\n\n # Dictionary that works as a set of queues and maps objects to\n # threads that acquired their locks or are willing to acquire\n # them.\n self.__objects = {}\n\n # Dictionary that maps a procedure to a 3-tuple that contains\n # the objects that the procedure needs to lock, the thread's id\n # of the caller which acquired the locks and a condition\n # variable if there is any.\n self.__procedures = {}\n\n # List with procedures that acquire all the necessary locks and\n # can be executed.\n self.__free = []",
"def readLock(self) -> \"int\":\n return _coin.SbRWMutex_readLock(self)",
"def __init__(self, obj):\r\n if not _LOCKFUNCS:\r\n _cache_lockfuncs()\r\n self.obj = obj\r\n self.locks = {}\r\n self.reset()",
"def tryReadLock(self) -> \"int\":\n return _coin.SbRWMutex_tryReadLock(self)",
"def __init__(self):\n this = _coin.new_SbMutex()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __enter__(self):\n self._rpc_lock()\n old_mask = os.umask(0o077)\n try:\n trial_count = 0\n while self._fid is None and trial_count < 2:\n if os.path.exists(self._LOCK_PATH):\n # Rename existing file if it is not secure\n is_secure_path(self._LOCK_PATH)\n self._fid = open(self._LOCK_PATH, 'a+')\n if not is_secure_file(self._LOCK_PATH, self._fid):\n # File is insecure and was renamed, try again\n self._fid.close()\n self._fid = None\n trial_count += 1\n finally:\n os.umask(old_mask)\n if self._fid == None:\n self._rpc_unlock()\n raise RuntimeError('Unable to open write lock securely after two tries')\n # Advisory lock protects against simultaneous multi-process\n # modifications to the file, although we expect only one geopmd\n # process using this class.\n fcntl.lockf(self._fid, fcntl.LOCK_EX)\n self._fid.seek(0)\n return self",
"def __init__(self):\n this = _coin.new_SbThreadMutex()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, **kwargs):\r\n if kwargs:\r\n _init_command(self, **kwargs)\r\n self.lockhandler = LockHandler(self)",
"def __init__(self, *args):\n this = _coin.new_SbThreadAutoLock(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def _read(self):\n\n if self._fid is None:\n raise RuntimeError('The WriteLock object must be used within a context manager')\n contents = self._fid.readline(64)\n self._fid.seek(0)\n return contents",
"def acquire_write(self):\n with self.monitor:\n if self.tlocal.rcount > 0:\n return self._promote()\n else:\n return self._acquire_write()",
"def writeLock(self) -> \"int\":\n return _coin.SbRWMutex_writeLock(self)",
"def __init__(self, *args):\n _ida_pro.qmutex_locker_t_swiginit(self, _ida_pro.new_qmutex_locker_t(*args))",
"def _promote(self):\n #Release all our read locks...\n self.rwlock -= self.tlocal.rcount\n while self.rwlock != 0:\n self.writers_waiting += 1\n self.writers_ok.wait()\n self.writers_waiting -= 1\n self.writer = threading.currentThread()\n self.rwlock = -1\n #Convert count of read locks to count of write locks, \n # this converts allour held read lock to write, and adds one for our new lock!\n self.wcount = self.tlocal.rcount + 1\n self.tlocal.rcount = 0",
"def lock_write(self):\n token = self._call(\"lockWrite\")\n token = IToken(token)\n return token",
"def init(self,):\r\n self.random_seed_ = self.random_state\r\n self.random_state_ = check_random_state(self.random_seed_)\r\n return self",
"def __init__(self, *args, **kwds):\n self.multitonKey = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Acquire a read lock for the current thread, waiting at most timeout seconds or doing a nonblocking check in case timeout is <= 0. In case timeout is None, the call to acquireRead blocks until the lock request can be serviced. In case the timeout expires before the lock could be serviced, a RuntimeError is thrown.
|
def acquireRead(self, blocking=True, timeout=None):
if not blocking:
endtime = -1
elif timeout is not None:
endtime = time() + timeout
else:
endtime = None
me = current_thread()
self.__condition.acquire()
try:
if self.__writer is me:
# If we are the writer, grant a new read lock, always.
self.__writercount += 1
return
while True:
if self.__writer is None:
# Only test anything if there is no current writer.
if self.__upgradewritercount or self.__pendingwriters:
if me in self.__readers:
# Only grant a read lock if we already have one
# in case writers are waiting for their turn.
# This means that writers can't easily get starved
# (but see below, readers can).
self.__readers[me] += 1
return
# No, we aren't a reader (yet), wait for our turn.
else:
# Grant a new read lock, always, in case there are
# no pending writers (and no writer).
self.__readers[me] = self.__readers.get(me, 0) + 1
return
if timeout is not None:
remaining = endtime - time()
if remaining <= 0:
# Timeout has expired, signal caller of this.
raise RuntimeError("Acquiring read lock timed out")
self.__condition.wait(remaining)
else:
self.__condition.wait()
finally:
self.__condition.release()
|
[
"async def acquire(self, blocking=None, blocking_timeout=None):\n sleep = self.sleep\n token = b(uuid.uuid1().hex)\n if blocking is None:\n blocking = self.blocking\n if blocking_timeout is None:\n blocking_timeout = self.blocking_timeout\n blocking_timeout = blocking_timeout or self.timeout\n stop_trying_at = mod_time.time() + min(blocking_timeout, self.timeout)\n\n while True:\n if await self.do_acquire(token):\n lock_acquired_at = mod_time.time()\n if await self.check_lock_in_slaves(token):\n check_finished_at = mod_time.time()\n # if time expends on acquiring lock is greater than given time\n # the lock should be released manually\n if check_finished_at > stop_trying_at:\n await self.do_release(token)\n return False\n self.local.token = token\n # validity time is considered to be the\n # initial validity time minus the time elapsed during check\n await self.do_extend(lock_acquired_at - check_finished_at)\n return True\n else:\n await self.do_release(token)\n return False\n if not blocking or mod_time.time() > stop_trying_at:\n return False\n await asyncio.sleep(sleep, loop=self.redis.connection_pool.loop)",
"def acquire(self, timeout=None):\n if timeout is None:\n # Wait forever (INFINITE)\n timeout = 0xFFFFFFFF\n else:\n timeout = int(round(timeout * 1000))\n ret = _WaitForSingleObject(self.handle, timeout)\n if ret in (0, 0x80):\n # Note that this doesn't distinguish between normally acquired (0) and\n # acquired due to another owning process terminating without releasing (0x80)\n self.acquired = True\n return True\n elif ret == 0x102:\n # Timeout\n self.acquired = False\n return False\n else:\n # Waiting failed\n raise ctypes.WinError()",
"def tryReadLock(self) -> \"int\":\n return _coin.SbRWMutex_tryReadLock(self)",
"def acquire(self):\n assert not self.has_lock\n\n wait_reporter = p4gf_log.LongWaitReporter(\"accessing p4key-lock\", LOG)\n while True:\n if self.do_acquire():\n self.has_lock = True\n LOG.debug2(\"lock-acquired %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-acquired stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n return self\n\n # lock held by others, attempt to remove stale owners\n if self.remove_stale_owners():\n continue\n\n # non-blocking case can only raise\n if not self.blocking:\n LOG.debug2(\"lock-busy %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-busy stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n raise LockBusy(self)\n\n wait_reporter.been_waiting()\n # just wait until lock can be acquired, either due to release or transfer death\n LOG.debug2(\"lock-waiting %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-waiting stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n time.sleep(_RETRY_PERIOD)",
"def acquire_read(self):\n with self.monitor:\n if self.rwlock == -1 and self.writer == threading.currentThread():\n #We already have a write lock - we don't acquire try to acquire\n # a read lock.- we increment the number of write locks.\n self.wcount +=1 \n else:\n return self._acquire_read()",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass",
"def read(self, timeout_sec=None):\n # try:\n # return self._queue.get(timeout=timeout_sec)\n # except Queue.Empty:\n # # Timeout exceeded, return None to signify no data received.\n # return None",
"def wait(self, timeout=None, *throw_args):\n if self.value is not _NOT_USED:\n if self._exc is None:\n return self.value\n else:\n api.getcurrent().throw(*self._exc)\n if timeout is not None:\n timer = api.timeout(timeout, *throw_args)\n timer.__enter__()\n if timeout==0:\n if timer.__exit__(None, None, None):\n return\n else:\n try:\n api.getcurrent().throw(*timer.throw_args)\n except:\n if not timer.__exit__(*sys.exc_info()):\n raise\n return\n EXC = True\n try:\n try:\n waiter = Waiter()\n self.link(waiter)\n try:\n return waiter.wait()\n finally:\n self.unlink(waiter)\n except:\n EXC = False\n if timeout is None or not timer.__exit__(*sys.exc_info()):\n raise\n finally:\n if timeout is not None and EXC:\n timer.__exit__(None, None, None)",
"def read_lock_access(self):\n logger.debug(\"Getting read access for the grype_db lock\")\n read_lock = self._grype_db_lock.gen_rlock()\n\n try:\n yield read_lock.acquire(\n blocking=False, timeout=self.LOCK_READ_ACCESS_TIMEOUT\n )\n except Exception as exception:\n raise exception\n finally:\n logger.debug(\"Releasing read access for the grype_db lock\")\n read_lock.release()",
"def Exclusive(self, blocking=False, timeout=None):\n self._flock(fcntl.LOCK_EX, blocking, timeout,\n \"Failed to lock %s in exclusive mode\" % self.filename)",
"def test_read_lock_acquired(self) -> None:\n # First to acquire this lock, so it should complete\n lock = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=False)\n )\n assert lock is not None\n\n # Enter the context manager\n self.get_success(lock.__aenter__())\n\n # Attempting to acquire the write lock fails\n lock2 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n self.assertIsNone(lock2)\n\n # Attempting to acquire a read lock succeeds\n lock3 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=False)\n )\n assert lock3 is not None\n self.get_success(lock3.__aenter__())\n\n # Calling `is_still_valid` reports true.\n self.assertTrue(self.get_success(lock.is_still_valid()))\n\n # Drop the first lock\n self.get_success(lock.__aexit__(None, None, None))\n\n # Attempting to acquire the write lock still fails, as lock3 is still\n # active.\n lock4 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n self.assertIsNone(lock4)\n\n # Drop the still open third lock\n self.get_success(lock3.__aexit__(None, None, None))\n\n # We can now acquire the lock again.\n lock5 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n assert lock5 is not None\n self.get_success(lock5.__aenter__())\n self.get_success(lock5.__aexit__(None, None, None))",
"def wait(self, timeout=None):\n if hasattr(self, '_result'):\n return\n try:\n self.get(timeout)\n except Exception:\n pass",
"def needs_read_lock(func):\n def locked(self, *args, **kwargs):\n self.lock_for_read()\n try:\n return func(self, *args, **kwargs)\n finally:\n self.unlock()\n locked.__doc__ = func.__doc__\n locked.__name__ = func.__name__\n return locked",
"def test_read_timeout(self):\n\n\n # Test write\n self.session.execute(\"INSERT INTO test (k, v) VALUES (1, 1)\")\n\n # Assert read\n query = SimpleStatement(\"SELECT * FROM test WHERE k=1\", consistency_level=ConsistencyLevel.ALL)\n results = execute_until_pass(self.session, query)\n self.assertTrue(results)\n\n # Pause node so it shows as unreachable to coordinator\n get_node(1).pause()\n\n try:\n # Test read\n query = SimpleStatement(\"SELECT * FROM test\", consistency_level=ConsistencyLevel.ALL)\n with self.assertRaises(ReadTimeout):\n self.session.execute(query, timeout=None)\n self.assertEqual(1, self.cluster.metrics.stats.read_timeouts)\n\n finally:\n get_node(1).resume()",
"def wait(self, timeout=None):\n deadline = None if timeout is None else time.time() + timeout\n while True:\n # Because poll is guarded by the lock, we can just use busy-loop with\n # 0.1 secs interval (chosen heuristically).\n result = self.poll()\n if (result is not None or\n (deadline is not None and time.time() >= deadline)):\n # If subprocess is terminated or timed out, return the result.\n return result\n time.sleep(0.1)",
"def readLock(self) -> \"int\":\n return _coin.SbRWMutex_readLock(self)",
"def acquire_lock (self):\n\n try:\n self.cache[self.id].lock.acquire ()\n self.locked = True\n except KeyError:\n pass",
"def _read_non_blocking(queue, timeout):\n while True:\n # read line without blocking\n try: \n yield queue.get(timeout=timeout) # or queue.get_nowait()\n timeout = 0\n except Empty:\n return # done enumerating",
"def get(self, timeout=None):\n\n try:\n res = self._q.get(timeout=timeout)\n except Queue.Empty:\n raise multiprocessing.TimeoutError(\"Timed out\")\n\n if isinstance(res, Exception):\n raise res\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Acquire a write lock for the current thread, waiting at most timeout seconds or doing a nonblocking check in case timeout is <= 0. In case the write lock cannot be serviced due to the deadlock condition mentioned above, a ValueError is raised. In case timeout is None, the call to acquireWrite blocks until the lock request can be serviced. In case the timeout expires before the lock could be serviced, a RuntimeError is thrown.
|
def acquireWrite(self, timeout=None):
if timeout is not None:
endtime = time() + timeout
me, upgradewriter = current_thread(), False
self.__condition.acquire()
try:
if self.__writer is me:
# If we are the writer, grant a new write lock, always.
self.__writercount += 1
return
elif me in self.__readers:
# If we are a reader, no need to add us to pendingwriters,
# we get the upgradewriter slot.
if self.__upgradewritercount:
# If we are a reader and want to upgrade, and someone
# else also wants to upgrade, there is no way we can do
# this except if one of us releases all his read locks.
# Signal this to user.
if timeout is not None:
raise RuntimeError("Write lock upgrade would deadlock until timeout")
else:
raise ValueError("Inevitable dead lock, denying write lock")
upgradewriter = True
self.__upgradewritercount = self.__readers.pop(me)
else:
# We aren't a reader, so add us to the pending writers queue
# for synchronization with the readers.
self.__pendingwriters.append(me)
while True:
if not self.__readers and self.__writer is None:
# Only test anything if there are no readers and writers.
if self.__upgradewritercount:
if upgradewriter:
# There is a writer to upgrade, and it's us. Take
# the write lock.
self.__writer = me
self.__writercount = self.__upgradewritercount + 1
self.__upgradewritercount = 0
return
# There is a writer to upgrade, but it's not us.
# Always leave the upgrade writer the advance slot,
# because he presumes he'll get a write lock directly
# from a previously held read lock.
elif self.__pendingwriters[0] is me:
# If there are no readers and writers, it's always
# fine for us to take the writer slot, removing us
# from the pending writers queue.
# This might mean starvation for readers, though.
self.__writer = me
self.__writercount = 1
self.__pendingwriters = self.__pendingwriters[1:]
return
if timeout is not None:
remaining = endtime - time()
if remaining <= 0:
# Timeout has expired, signal caller of this.
if upgradewriter:
# Put us back on the reader queue. No need to
# signal anyone of this change, because no other
# writer could've taken our spot before we got
# here (because of remaining readers), as the test
# for proper conditions is at the start of the
# loop, not at the end.
self.__readers[me] = self.__upgradewritercount
self.__upgradewritercount = 0
else:
# We were a simple pending writer, just remove us
# from the FIFO list.
self.__pendingwriters.remove(me)
raise RuntimeError("Acquiring write lock timed out")
self.__condition.wait(remaining)
else:
self.__condition.wait()
finally:
self.__condition.release()
|
[
"def test_write_lock_acquired(self) -> None:\n # First to acquire this lock, so it should complete\n lock = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n assert lock is not None\n\n # Enter the context manager\n self.get_success(lock.__aenter__())\n\n # Attempting to acquire the lock again fails, as both read and write.\n lock2 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n self.assertIsNone(lock2)\n\n lock3 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=False)\n )\n self.assertIsNone(lock3)\n\n # Calling `is_still_valid` reports true.\n self.assertTrue(self.get_success(lock.is_still_valid()))\n\n # Drop the lock\n self.get_success(lock.__aexit__(None, None, None))\n\n # We can now acquire the lock again.\n lock4 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n assert lock4 is not None\n self.get_success(lock4.__aenter__())\n self.get_success(lock4.__aexit__(None, None, None))",
"def acquireRead(self, blocking=True, timeout=None):\r\n\r\n if not blocking:\r\n endtime = -1\r\n elif timeout is not None:\r\n endtime = time() + timeout\r\n else:\r\n endtime = None\r\n me = current_thread()\r\n self.__condition.acquire()\r\n try:\r\n if self.__writer is me:\r\n # If we are the writer, grant a new read lock, always.\r\n self.__writercount += 1\r\n return\r\n while True:\r\n if self.__writer is None:\r\n # Only test anything if there is no current writer.\r\n if self.__upgradewritercount or self.__pendingwriters:\r\n if me in self.__readers:\r\n # Only grant a read lock if we already have one\r\n # in case writers are waiting for their turn.\r\n # This means that writers can't easily get starved\r\n # (but see below, readers can).\r\n self.__readers[me] += 1\r\n return\r\n # No, we aren't a reader (yet), wait for our turn.\r\n else:\r\n # Grant a new read lock, always, in case there are\r\n # no pending writers (and no writer).\r\n self.__readers[me] = self.__readers.get(me, 0) + 1\r\n return\r\n if timeout is not None:\r\n remaining = endtime - time()\r\n if remaining <= 0:\r\n # Timeout has expired, signal caller of this.\r\n raise RuntimeError(\"Acquiring read lock timed out\")\r\n self.__condition.wait(remaining)\r\n else:\r\n self.__condition.wait()\r\n finally:\r\n self.__condition.release()",
"async def acquire(self, blocking=None, blocking_timeout=None):\n sleep = self.sleep\n token = b(uuid.uuid1().hex)\n if blocking is None:\n blocking = self.blocking\n if blocking_timeout is None:\n blocking_timeout = self.blocking_timeout\n blocking_timeout = blocking_timeout or self.timeout\n stop_trying_at = mod_time.time() + min(blocking_timeout, self.timeout)\n\n while True:\n if await self.do_acquire(token):\n lock_acquired_at = mod_time.time()\n if await self.check_lock_in_slaves(token):\n check_finished_at = mod_time.time()\n # if time expends on acquiring lock is greater than given time\n # the lock should be released manually\n if check_finished_at > stop_trying_at:\n await self.do_release(token)\n return False\n self.local.token = token\n # validity time is considered to be the\n # initial validity time minus the time elapsed during check\n await self.do_extend(lock_acquired_at - check_finished_at)\n return True\n else:\n await self.do_release(token)\n return False\n if not blocking or mod_time.time() > stop_trying_at:\n return False\n await asyncio.sleep(sleep, loop=self.redis.connection_pool.loop)",
"def test_write_timeout(self):\n\n # Test write\n self.session.execute(\"INSERT INTO test (k, v) VALUES (1, 1)\")\n\n # Assert read\n query = SimpleStatement(\"SELECT * FROM test WHERE k=1\", consistency_level=ConsistencyLevel.ALL)\n results = execute_until_pass(self.session, query)\n self.assertTrue(results)\n\n # Pause node so it shows as unreachable to coordinator\n get_node(1).pause()\n\n try:\n # Test write\n query = SimpleStatement(\"INSERT INTO test (k, v) VALUES (2, 2)\", consistency_level=ConsistencyLevel.ALL)\n with self.assertRaises(WriteTimeout):\n self.session.execute(query, timeout=None)\n self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)\n\n finally:\n get_node(1).resume()",
"def Exclusive(self, blocking=False, timeout=None):\n self._flock(fcntl.LOCK_EX, blocking, timeout,\n \"Failed to lock %s in exclusive mode\" % self.filename)",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass",
"def writeLock(self) -> \"int\":\n return _coin.SbRWMutex_writeLock(self)",
"def lock_write(self):\n token = self._call(\"lockWrite\")\n token = IToken(token)\n return token",
"def acquire(self, timeout=None):\n if timeout is None:\n # Wait forever (INFINITE)\n timeout = 0xFFFFFFFF\n else:\n timeout = int(round(timeout * 1000))\n ret = _WaitForSingleObject(self.handle, timeout)\n if ret in (0, 0x80):\n # Note that this doesn't distinguish between normally acquired (0) and\n # acquired due to another owning process terminating without releasing (0x80)\n self.acquired = True\n return True\n elif ret == 0x102:\n # Timeout\n self.acquired = False\n return False\n else:\n # Waiting failed\n raise ctypes.WinError()",
"def wait_and_lock():\n # Waits forever to get a lock on the lockfile\n # If an unrelated error occures a exception is raised \n self._f = open(self._filename, 'w')\n while true:\n try:\n fcntl.flock(filename, fcntl.LOCK_EX | dcnt.LOCK_NM)\n return\n except IOError as e:\n if e.errno == errno.EAGAIN:\n # Do not raise error when waiting to aquire lock\n time.sleep(0.1)\n else\n # Raise on all unrelated errors\n raise",
"def needs_write_lock(func):\n def locked(self, *args, **kwargs):\n self.lock_for_write()\n try:\n return func(self, *args, **kwargs)\n finally:\n self.unlock()\n locked.__doc__ = func.__doc__\n locked.__name__ = func.__name__\n return locked",
"def wait(self, timeout=None, *throw_args):\n if self.value is not _NOT_USED:\n if self._exc is None:\n return self.value\n else:\n api.getcurrent().throw(*self._exc)\n if timeout is not None:\n timer = api.timeout(timeout, *throw_args)\n timer.__enter__()\n if timeout==0:\n if timer.__exit__(None, None, None):\n return\n else:\n try:\n api.getcurrent().throw(*timer.throw_args)\n except:\n if not timer.__exit__(*sys.exc_info()):\n raise\n return\n EXC = True\n try:\n try:\n waiter = Waiter()\n self.link(waiter)\n try:\n return waiter.wait()\n finally:\n self.unlink(waiter)\n except:\n EXC = False\n if timeout is None or not timer.__exit__(*sys.exc_info()):\n raise\n finally:\n if timeout is not None and EXC:\n timer.__exit__(None, None, None)",
"def acquire_write(self):\n with self.monitor:\n if self.tlocal.rcount > 0:\n return self._promote()\n else:\n return self._acquire_write()",
"def select_write(self, timeout=None):\n\n _, write_ready, _ = select.select([], [self._socket], [], timeout)\n return len(write_ready) > 0",
"def test_write_locked(self):\n self.create_file_blank(self.FILENAME)\n self.lock_file(self.FILENAME)\n try:\n fileio.writeline(self.FILENAME, 1, \"data\")\n self.fail(\"Did not get expected exception\")\n except:\n pass # print(\"expected exception\")\n finally:\n self.unlock_file(self.FILENAME)",
"def test_read_lock_acquired(self) -> None:\n # First to acquire this lock, so it should complete\n lock = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=False)\n )\n assert lock is not None\n\n # Enter the context manager\n self.get_success(lock.__aenter__())\n\n # Attempting to acquire the write lock fails\n lock2 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n self.assertIsNone(lock2)\n\n # Attempting to acquire a read lock succeeds\n lock3 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=False)\n )\n assert lock3 is not None\n self.get_success(lock3.__aenter__())\n\n # Calling `is_still_valid` reports true.\n self.assertTrue(self.get_success(lock.is_still_valid()))\n\n # Drop the first lock\n self.get_success(lock.__aexit__(None, None, None))\n\n # Attempting to acquire the write lock still fails, as lock3 is still\n # active.\n lock4 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n self.assertIsNone(lock4)\n\n # Drop the still open third lock\n self.get_success(lock3.__aexit__(None, None, None))\n\n # We can now acquire the lock again.\n lock5 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n assert lock5 is not None\n self.get_success(lock5.__aenter__())\n self.get_success(lock5.__aexit__(None, None, None))",
"def send_until_writable(timeout=0.5):\n\n def inner(f, socket, message):\n st = time.perf_counter()\n while time.perf_counter() - st < timeout:\n if check_writable(socket):\n return f(message)\n\n return inner",
"def tryWriteLock(self) -> \"SbBool\":\n return _coin.SbRWMutex_tryWriteLock(self)",
"def lock_and_wait(path, action_name, timeout=5.0, delay=2.0):\n LOG.info('%s %s: locking and waiting %0.1f seconds', path, action_name, delay)\n lock = FileLock(path, timeout=timeout)\n with lock:\n time.sleep(delay)\n LOG.info('%s: unlocked', path)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open a dialogue box (dialog) using a program appropriate to the desktop environment in use. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to open the dialog instead of guessing or detecting which environment is being used. Suggested values for 'desktop' are "standard", "KDE", "GNOME", "Mac OS X", "Windows". The result of the dialogue interaction may be a string indicating user input (for Input, Password, Menu, Pulldown), a list of strings indicating selections of one or more items (for RadioList, CheckList), or a value indicating true or false (for Question, Warning, Message, Error). Where a string value may be expected but no choice is made, an empty string may be returned. Similarly, where a list of values is expected but no choice is made, an empty list may be returned.
|
def open(self, desktop=None):
# Decide on the desktop environment in use.
desktop_in_use = use_desktop(desktop)
# Get the program.
try:
program = self.commands[desktop_in_use]
except KeyError:
raise OSError("Desktop '%s' not supported (no known dialogue box command could be suggested)" % desktop_in_use)
# The handler is one of the functions communicating with the subprocess.
# Some handlers return boolean values, others strings.
handler, options = self.info[program]
cmd = [program]
for option in options:
if isinstance(option, str):
cmd.append(option)
else:
value = getattr(self, option.name, None)
cmd += option.convert(value, program)
return handler(cmd, 0)
|
[
"def user32_OpenDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_SwitchDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def littleDialog():\r\n psm = uno.getComponentContext().ServiceManager\r\n dp = psm.createInstance(\"com.sun.star.awt.DialogProvider\")\r\n dlg = dp.createDialog(\"vnd.sun.star.script:Standard.Dialog1?location=application\")\r\n dlg.execute()\r\n return None",
"def execute_desktop(deskfile):\n import re\n import os\n from subprocess import Popen\n\n deskfile = os.path.expanduser(deskfile)\n deskfile = os.path.expandvars(deskfile)\n\n try:\n data = open(deskfile, 'r').read()\n cmd= re.findall('Exec=(.*)', data)[0]\n #print cmd\n Popen(cmd.split())\n return True\n except Exception as err:\n logger.exception(err)\n #print err\n return False",
"def user32_OpenInputDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_CreateDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_CreateDesktopEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\", \"ulHeapSize\", \"pvoid\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def createDesktopFile(command=\"\", server=None, itype='work'):\n\ticon = Config[itype]['icon']\n\textra = Config[itype]['extra']\n\n\tname=\"%s%s\"%(Config['sshPrefix'], server)\n\tif command is not None:\n\t\tcomment=\"Connect to %s (%s)\"%(server, command)\n\t\texe=\"xterm {extra} -T {server} -name xterm -class {server} -e ssh {command}\".format(extra=extra, server=server, command=command)\n\telse:\n\t\tcomment=\"Connect to %s\"%(server)\n\t\texe=\"xterm {extra} -T {server} -name xterm -class {server} -e ssh {server}\".format(extra=extra, server=server)\n\n\tdeskfile=\"\"\"[Desktop Entry]\nName={name}\nComment={comment}\nExec={exe}\nTerminal=false\nType=Application\n#Encoding=UTF-8\nIcon={icon}\n#StartupWMClass=work\n\"\"\".format(name=name, comment=comment,exe=exe,icon=icon)\n\n\texisted = False\n\tnewFile = str(Config['appDir']) + str(Config['sshPrefix']) + str(server) + \".desktop\"\n\tif os.path.isfile(newFile) :\n\t\texisted = True\n\t\tres = query_yes_no(\"File [%s%s.desktop] already exists. Overwrite?\"%(Config['sshPrefix'], server))\n\t\tif res == True:\n\t\t\tprint \"Will overwrite file\"\n\t\telse:\n\t\t\tprint \"Exiting\"\n\t\t\tsys.exit(0)\n\n\tf = open(newFile, 'w+')\n\tf.write(deskfile)\n\tf.close()\n\n\tprint \"File written to [%s]\"%newFile\n\tprint \"\"\n\tprint deskfile\n\n\tif existed == False:\n\t\taddToMenu(str(Config['sshPrefix']) + str(server) + \".desktop\", itype)\n\telse:\n\t\tprint 'Not adding to menu file because it already existed'",
"def createDesktopFile(command=\"\", server=None, itype='work'):\n icon = Config[itype]['icon']\n extra = Config[itype]['extra']\n menu = Config[itype]['menu']\n\n name=\"%s%s\"%(Config['sshPrefix'], server)\n if command is not None:\n comment=\"Connect to %s (%s)\"%(server, command)\n exe=\"{cmd} {extra} --disable-factory --app-id com.sshmenu.{server} -e \\\"ssh {command}\\\"\".format(cmd=Config['gt-command'],extra=extra, server=server, command=command)\n else:\n comment=\"Connect to %s\"%(server)\n exe=\"{cmd} {extra} --disable-factory --app-id com.sshmenu.{server} -e \\\"ssh {server}\\\"\".format(cmd=Config['gt-command'],extra=extra, server=server)\n\n deskfile=\"\"\"[Desktop Entry]\nName={name}\nComment={comment}\nExec={exe}\nIcon={icon}\nType=Application\nCategories={menu};System;TerminalEmulator;\nStartupNotify=true\nX-GNOME-SingleWindow=false\nOnlyShowIn=GNOME;Unity;\nActions=New\nX-Ubuntu-Gettext-Domain=gnome-terminal\nStartupWMClass={server}\n\"\"\".format(name=name, comment=comment,exe=exe,icon=icon,menu=menu,server=server)\n\n existed = False\n newFile = str(Config['appDir']) + str(Config['sshPrefix']) + str(server) + \".desktop\"\n if os.path.isfile(newFile) :\n existed = True\n res = query_yes_no(\"File [%s%s.desktop] already exists. Overwrite?\"%(Config['sshPrefix'], server))\n if res == True:\n print \"Will overwrite file\"\n else:\n print \"Exiting\"\n sys.exit(0)\n\n f = open(newFile, 'w+')\n f.write(deskfile)\n f.close()\n\n print \"File written to [%s]\"%newFile\n print \"\"\n print deskfile",
"def createdesktop(self, authinfo, userinfo, **kwargs):\n\n myDesktop = None # default return object \n\n args = kwargs.get('args')\n image = kwargs.get('image')\n command = kwargs.get('command')\n env = kwargs.get('env', {} )\n appname = kwargs.get('appname')\n preferednodehostname = kwargs.get('preferednodehostname', None )\n\n vncPassword = self.mkvnc_password()\n args.extend('--vncpassword {}'.format(vncPassword).split(' '))\n \n labels = { 'access_provider': authinfo.provider,\n 'access_userid': userinfo.userid,\n 'access_username': self.get_labelvalue(userinfo.name),\n 'domain': self.endpoint_domain,\n 'vnc_password': vncPassword }\n\n # check if we run the desktop in metappli mode or desktop mode\n if type(appname) is str :\n # if appname is set then create a metappli labels\n # this will change and run the app\n labels[ 'type' ] = self.x11servertype_embeded\n kwargs[ 'type' ] = self.x11servertype_embeded\n # this is specific to metappli mode\n labels[ 'appname' ] = appname\n else:\n # if appname is None then create a desktop\n # set value as default type x11servertype\n labels[ 'type' ] = self.x11servertype\n kwargs[ 'type' ] = self.x11servertype\n\n myuuid = str(uuid.uuid4())\n pod_name = self.get_podname( str(uuid.uuid4() ) ) \n container_name = self.get_graphicalcontainername( myuuid ) \n\n # envdict to envlist\n envlist = []\n for k, v in env.items():\n # need to convert v as str : kubernetes supports ONLY string type to env value\n envlist.append( { 'name': k, 'value': str(v) } )\n\n\n '''\n env:\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n '''\n envlist.append( { 'name': 'NODE_NAME', 'valueFrom': { 'fieldRef': { 'fieldPath':'spec.nodeName' } } } )\n envlist.append( { 'name': 'POD_NAME', 'valueFrom': { 'fieldRef': { 'fieldPath':'metadata.name' } } } )\n envlist.append( { 'name': 'POD_NAMESPACE', 'valueFrom': { 'fieldRef': { 'fieldPath':'metadata.namespace' } } } )\n envlist.append( { 'name': 'POD_IP', 'valueFrom': { 'fieldRef': { 'fieldPath':'status.podIP' } } } )\n\n\n self.on_desktoplaunchprogress('Building data storage for your desktop')\n (volumes, volumeMounts) = self.build_desktopvolumes( authinfo, userinfo, **kwargs)\n list_volumes = list( volumes.values() )\n list_volumeMounts = list( volumeMounts.values() )\n self.logger.info( 'volumes=%s', volumes.values() )\n self.logger.info( 'volumeMounts=%s', volumeMounts.values() )\n\n initContainers = []\n\n if oc.od.settings.desktopuseinitcontainer is True and \\\n type(oc.od.settings.desktopuseinitcontainercommand) is list and \\\n type(oc.od.settings.desktopinitcontainerimage) is str :\n # init container chown to change the owner of the home directory\n init_name = 'init' + pod_name\n initContainers.append( { 'imagePullPolicy': 'IfNotPresent',\n 'name': init_name,\n 'image': oc.od.settings.desktopinitcontainerimage,\n 'command': oc.od.settings.desktopuseinitcontainercommand,\n 'volumeMounts': list_volumeMounts\n } )\n self.logger.debug( 'initContainers is %s', initContainers)\n\n\n nodeselector = {}\n if type(preferednodehostname) is str :\n nodeselector.update( { 'kubernetes.io/hostname': preferednodehostname } )\n if type(oc.od.settings.desktopnodeselector) is dict:\n nodeselector.update( oc.od.settings.desktopnodeselector )\n\n pod_manifest = {\n 'apiVersion': 'v1',\n 'kind': 'Pod',\n 'metadata': {\n 'name': pod_name,\n 'namespace': self.namespace,\n 'labels': labels,\n **self.get_annotations_lastlogin_datetime()\n },\n 'spec': {\n 'subdomain': self.endpoint_domain,\n 'shareProcessNamespace': oc.od.settings.desktopusershareprocessnamespace,\n 'volumes': list_volumes, \n 'nodeSelector': oc.od.settings.desktopnodeselector, \n 'initContainers': initContainers,\n 'containers': [ { \n 'imagePullPolicy': 'IfNotPresent',\n 'image': image,\n 'name': container_name,\n 'command': command,\n 'args': args,\n 'env': envlist,\n 'volumeMounts': list_volumeMounts,\n 'securityContext': \n { \n # permit sudo command inside the container False by default \n 'allowPrivilegeEscalation': oc.od.settings.desktopallowPrivilegeEscalation,\n # to permit strace call 'capabilities': { 'add': [\"SYS_ADMIN\", \"SYS_PTRACE\"] \n 'capabilities': oc.od.settings.desktopcapabilities\n } \n } \n ],\n }\n }\n\n if oc.od.settings.desktopimagepullsecret:\n pod_manifest['spec']['imagePullSecrets'] = [ { 'name': oc.od.settings.desktopimagepullsecret } ]\n\n if oc.od.settings.desktopuseprintercontainer is True and type(oc.od.settings.desktopprinterimage) is str :\n # get the container sound name prefix with 'p' like sound\n container_printer_name = self.get_printercontainername( myuuid )\n pod_manifest['spec']['containers'].append( { \n 'name': container_printer_name,\n 'imagePullPolicy': 'IfNotPresent',\n 'image': oc.od.settings.desktopprinterimage, \n 'env': envlist,\n 'volumeMounts': list_volumeMounts \n } \n )\n \n if oc.od.settings.desktopusesoundcontainer is True and type(oc.od.settings.desktopsoundimage) is str :\n # get the container sound name prefix with 's' like sound\n container_sound_name = self.get_soundcontainername( myuuid )\n # pulseaudio need only shared volume \n # /tmp for the unix socket \n # /dev/shm for the share memory\n # this is a filter to reduce surface attack\n soundcontainerlist_volumeMounts = [ {'mountPath': '/dev/shm', 'name': 'shm'}, \n {'mountPath': '/tmp', 'name': 'tmp'} ]\n pod_manifest['spec']['containers'].append( { \n 'name': container_sound_name,\n 'imagePullPolicy': 'IfNotPresent',\n 'image': oc.od.settings.desktopsoundimage, \n 'env': envlist,\n 'volumeMounts': soundcontainerlist_volumeMounts \n } \n )\n\n # if metapply stop, do not restart the pod \n if kwargs[ 'type' ] == self.x11servertype_embeded :\n pod_manifest['spec']['restartPolicy'] = 'Never'\n\n self.logger.info( pod_manifest )\n\n # we are ready to create our Pod \n myDesktop = None\n try:\n nMaxEvent = 64\n nEventCount = 0\n \n self.on_desktoplaunchprogress('Creating your desktop')\n pod = self.kubeapi.create_namespaced_pod(namespace=self.namespace,body=pod_manifest )\n\n if type(pod) is not client.models.v1_pod.V1Pod:\n self.on_desktoplaunchprogress('Create Pod failed.' )\n raise ValueError( 'Invalid create_namespaced_pod type')\n\n try: \n self.logger.info( 'Start watching events' )\n self.on_desktoplaunchprogress('Watching for events from services')\n w = watch.Watch() \n for event in w.stream( self.kubeapi.list_namespaced_pod, \n namespace=self.namespace, \n field_selector='metadata.name=' + pod_name ): \n event_type = event.get('type')\n if event_type is None:\n # nothing to do \n continue\n\n self.logger.info('count=%d event_type=%s ', nEventCount, event['type'] )\n self.on_desktoplaunchprogress('{} event received', event_type.lower() )\n\n nEventCount +=1\n if nEventCount > nMaxEvent: \n w.stop() \n \n if event_type == 'ADDED':\n self.logger.info('event type ADDED received')\n # the pod has been added\n # wait for next MODIFIED event type\n \n if event_type == 'MODIFIED':\n self.logger.info('event type MODIFIED received')\n # pod_event = w.unmarshal_event( data=event['object'], return_type=type(pod) )\n\n pod_event = event.get('object')\n \n if type(pod_event) == type(pod) : \n self.on_desktoplaunchprogress('Install process can take up to 10 s. Status is {}:{}', pod_event.status.phase, event_type.lower() )\n if pod_event.status.phase != 'Pending' :\n self.logger.info('Stop event')\n w.stop() \n \n except ApiException as e:\n self.logger.debug(\"Exception when calling CoreV1Api->list_namespaced_pod: %s\\n\", str(e) )\n\n\n self.logger.debug( \"%d/%d\", nEventCount, nMaxEvent )\n \n myPod = self.kubeapi.read_namespaced_pod(namespace=self.namespace,name=pod_name) \n self.on_desktoplaunchprogress('Your desktop phase is {}.', myPod.status.phase.lower() )\n \n self.logger.info( 'myPod %s', myPod)\n self.logger.info( 'myPod.metadata.name is %s, ipAddr is %s', myPod.metadata.name, myPod.status.pod_ip)\n\n myDesktop = self.pod2desktop( myPod )\n\n except ApiException as e:\n self.logger.error( str(e) )\n\n \n return myDesktop",
"def user32_EnumDesktopWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\", \"lpfn\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_desktop_client():\n print(\"Emulating desktop app\")\n\n consumer_key = input('Please enter consumer key: > ')\n consumer_secret = input('Please enter key secret: > ')\n config = upwork.Config({'consumer_key': consumer_key, 'consumer_secret': consumer_secret})\n \"\"\"Assign access_token and access_token_secret if they are known\n config = upwork.Config({\\\n 'consumer_key': 'xxxxxxxxxxx',\\\n 'consumer_secret': 'xxxxxxxxxxx',\\\n 'access_token': 'xxxxxxxxxxx',\\\n 'access_token_secret': 'xxxxxxxxxxx'})\n \"\"\"\n\n client = upwork.Client(config)\n\n try:\n config.access_token\n config.access_token_secret\n except AttributeError:\n verifier = input(\n 'Please enter the verification code you get '\n 'following this link:\\n{0}\\n\\n> '.format(\n client.get_authorization_url()))\n\n print('Retrieving keys.... ')\n access_token, access_token_secret = client.get_access_token(verifier)\n print('OK')\n\n # For further use you can store ``access_toket`` and\n # ``access_token_secret`` somewhere\n\n return client",
"def user32_PaintDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hdc\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_CloseDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def change_desktop_path(self, master):\n master.withdraw()\n answer = filedialog.askdirectory(title=SELECT_DESKTOP_TITLE)\n if answer:\n global user_desktop_path\n user_desktop_path = answer.replace('\\\\', '/')\n self.write_data()",
"def user32_SetThreadDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def change_window_desktop(self, window: xlib.Window, desktop: int) -> None:\n if desktop < 0:\n return\n\n self._send_event(window=window, mtype=self.atom[\"_NET_WM_DESKTOP\"], data=[desktop])\n self._flush()",
"def user32_EnumDesktops(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hwinsta\", \"lpEnumFunc\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def create_desktop_file(values, template):\n app_dir = os.path.join(os.environ.get(\"HOME\"), \".local/share/applications\")\n\n # Read the file\n with open(template, \"r\") as file:\n desktop_file_content = Template(file.read()).render(values)\n\n # Write the .desktop file\n desktop_file = os.path.join(app_dir, \"{}.desktop\".format(values[\"pkg_name\"]))\n with open(desktop_file, \"w\") as file:\n file.write(desktop_file_content)\n\n # Make it executable\n os.chmod(desktop_file, 0o755)",
"def resumedesktop(self, authinfo, userinfo, **kwargs):\n self.logger.info('')\n myDesktop = None\n myPod = self.findPodByUser(authinfo, userinfo)\n\n if myPod is None : \n self.logger.info( 'Pod name not found for user %s ', userinfo.userid )\n else:\n newlabel = {\"metadata\": self.get_annotations_lastlogin_datetime() }\n v1newPod = self.kubeapi.patch_namespaced_pod( name=myPod.metadata.name, \n namespace=self.namespace, \n body=newlabel )\n myDesktop = self.pod2desktop( v1newPod )\n return myDesktop"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialise a menu with the given heading 'text', column 'titles', and optional 'items' (which may be added later), 'width' (in characters), 'height' (in characters) and 'list_height' (in items).
|
def __init__(self, text, titles, items=None, width=None, height=None, list_height=None):
Simple.__init__(self, text, width, height)
self.titles = ([""] * self.number_of_titles + titles)[-self.number_of_titles:]
self.items = items or []
self.list_height = list_height
|
[
"def __init__(self, menu_list, attr, pos, body):\n \n content = [urwid.AttrWrap(SelText(\" \" + w), None, attr[1])\n for w in menu_list]\n\n #Calculate width and height of the menu widget:\n height = len(menu_list)\n width = 0\n for entry in menu_list:\n if len(entry) > width:\n width = len(entry)\n\n #Create the ListBox widget and put it on top of body:\n self._listbox = urwid.AttrWrap(urwid.ListBox(content), attr[0])\n overlay = urwid.Overlay(self._listbox, body, ('fixed left', pos[0]),\n width + 2, ('fixed top', pos[1]), height)\n\n urwid.WidgetWrap.__init__(self, overlay)",
"def buildMenu(item):\n\n # fill the marking menu items\n name = item['name']\n subMenu = item['subMenu']\n position = item['position']\n # to be added to each item to correctly close the marking menu\n onCloseCommand = ';import dmptools.setup.markingMenu as markingMenu;markingMenu.deleteMarkingMenu()'\n # create item\n if position:\n command = item['command'].replace('python(\"', '').replace('\");', '')\n cmds.menuItem(\n label=name,\n subMenu=subMenu,\n command=command+onCloseCommand,\n enable=True,\n data=0,\n boldFont=False,\n radialPosition=position,\n enableCommandRepeat=True,\n image=\"commandButton.png\",\n echoCommand=1,\n sourceType=\"python\",\n )\n else:\n if name == 'separator':\n cmds.menuItem(divider=True)\n else:\n command = item['command'].replace('python(\"', '').replace('\");', '')\n cmds.menuItem(\n label=name,\n subMenu=subMenu,\n command=command+onCloseCommand,\n enable=True,\n data=0,\n boldFont=False,\n enableCommandRepeat=True,\n image=\"commandButton.png\",\n echoCommand=1,\n sourceType=\"python\",\n )",
"def build_menu(self):\r\n menubar = Menu(self)\r\n \r\n # File Menu with Commands\r\n filemenu = Menu(menubar, tearoff=0)\r\n \r\n filemenu.add_command(label=\"New List\", command=self.new_list)\r\n filemenu.add_command(label=\"Import MySQL Database\", command=self.import_db)\r\n filemenu.add_command(label=\"Import CSV File\", command=self.import_csv)\r\n \r\n filemenu.add_separator()\r\n \r\n filemenu.add_command(label=\"Export MySQLDatabase\", command=self.export_db)\r\n filemenu.add_command(label=\"Export CSV\", command=self.export_csv)\r\n \r\n filemenu.add_separator()\r\n\r\n filemenu.add_command(label=\"Exit\", command=self.destroy)\r\n \r\n # Edit Menu with Commands\r\n editmenu = Menu(menubar, tearoff=0)\r\n \r\n editmenu.add_command(label=\"Insert Row Above\", command=self.insert_row)\r\n editmenu.add_command(label=\"Delete Current Row\", command=self.delete_row)\r\n editmenu.add_command(label=\"Update Current Row\", command=self.edit_row)\r\n \r\n menubar.add_cascade(label=\"File\", menu=filemenu)\r\n menubar.add_cascade(label=\"Edit\", menu=editmenu)\r\n \r\n helpmenu = Menu(menubar, tearoff=0)\r\n helpmenu.add_command(label=\"About...\", command=self.open_about)\r\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\r\n\r\n self.config(menu=menubar)",
"def __init__(self):\n\n self._menu = Menu()\n self._menu.add_menu_item('b', 'Binary', None)\n self._menu.add_menu_item('o', 'Octal', None)\n self._menu.add_menu_item('d', 'Decimal', None)\n self._menu.add_menu_item('h', 'Hexadecimal', None)",
"def _build_menus(self):\n debug('Timeline._build_menus')\n self.menu=tk.Menu(self.root, tearoff=0)\n #self.menu.add_command(label=\"Status\", command=self._set_status_text_for_item)\n #self.menu.add_separator()\n #self.menu.add_command(label=\"Rename\", command=self._open_item_rename_form)",
"def __init__(self, columns, title=None, max_width=None,\n include_headings=True, use_row_separators=True,\n column_margin=1):\n if type(max_width) is int:\n if max_width <= 0:\n raise ValueError(f'invalid maximum table width: {max_width}')\n min_width = sum((col.width for col in columns if col.width),\n len(columns) * (2 * column_margin + 2) + 1)\n if max_width < min_width:\n raise ValueError(\n f'maximum table width is too narrow: {max_width}')\n self.columns = columns\n self.title = title\n self.use_row_separators = use_row_separators\n self.max_width = max_width\n self.include_headings = include_headings\n self.column_margin = column_margin\n self._text_lines = []\n self.data = []",
"def _create_command_menu(self):\n f1 = urwid.Button('Jump', on_press=self.button_show_jump)\n f2 = urwid.Button('Sell', on_press=self.button_show_sell)\n f3 = urwid.Button('Buy', on_press=self.button_show_buy)\n f4 = urwid.Button('Upgrade', on_press=self.button_show_equip)\n f5 = urwid.Button('Galaxy', on_press=self.button_show_galaxy)\n f6 = urwid.Button('Locals', on_press=self.button_show_locals)\n f7 = urwid.Button('System', on_press=self.button_show_planet_info)\n f8 = urwid.Button('Market', on_press=self.button_show_market)\n f9 = urwid.Button('Status', on_press=self.button_show_status)\n f0 = urwid.Button('Cargo', on_press=self.button_show_cargo)\n buttons = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f0]\n buttons = (urwid.AttrMap(b, 'button') for b in buttons)\n menu = urwid.Columns(buttons)\n menu.focus_position = 8\n return menu",
"def __init__(self, key, text=\"\", links=None, linktexts=None,\r\n keywords=None, cols=1, helptext=None,\r\n selectcmds=None, code=\"\", nodefaultcmds=False, separator=\"\"):\r\n self.key = key\r\n self.cmdset = None\r\n self.links = links\r\n self.linktexts = linktexts\r\n self.keywords = keywords\r\n self.cols = cols\r\n self.selectcmds = selectcmds\r\n self.code = code\r\n self.nodefaultcmds = nodefaultcmds\r\n self.separator = separator\r\n Nlinks = len(self.links)\r\n\r\n # validate the input\r\n if not self.links:\r\n self.links = []\r\n if not self.linktexts or (len(self.linktexts) != Nlinks):\r\n self.linktexts = [None for i in range(Nlinks)]\r\n if not self.keywords or (len(self.keywords) != Nlinks):\r\n self.keywords = [None for i in range(Nlinks)]\r\n if not selectcmds or (len(self.selectcmds) != Nlinks):\r\n self.selectcmds = [None for i in range(Nlinks)]\r\n\r\n # Format default text for the menu-help command\r\n if not helptext:\r\n helptext = \"Select one of the valid options (\"\r\n for i in range(Nlinks):\r\n if self.keywords[i]:\r\n if self.keywords[i] not in (CMD_NOMATCH, CMD_NOINPUT):\r\n helptext += \"%s, \" % self.keywords[i]\r\n else:\r\n helptext += \"%s, \" % (i + 1)\r\n helptext = helptext.rstrip(\", \") + \")\"\r\n self.helptext = helptext\r\n\r\n # Format text display\r\n string = \"\"\r\n if text:\r\n string += \"%s\\n\" % text\r\n\r\n # format the choices into as many collumns as specified\r\n choices = []\r\n for ilink, link in enumerate(self.links):\r\n choice = \"\"\r\n if self.keywords[ilink]:\r\n if self.keywords[ilink] not in (CMD_NOMATCH, CMD_NOINPUT):\r\n choice += \"{g%s{n\" % self.keywords[ilink]\r\n else:\r\n choice += \"{g %i{n\" % (ilink + 1)\r\n if self.linktexts[ilink]:\r\n choice += \" - %s\" % self.linktexts[ilink]\r\n choices.append(choice)\r\n cols = [[] for i in range(min(len(choices), cols))]\r\n while True:\r\n for i in range(len(cols)):\r\n if not choices:\r\n cols[i].append(\"\")\r\n else:\r\n cols[i].append(choices.pop(0))\r\n if not choices:\r\n break\r\n ftable = utils.format_table(cols)\r\n for row in ftable:\r\n string += \"\\n\" + \"\".join(row)\r\n # store text\r\n self.text = self.separator + \"\\n\" + string.rstrip()",
"def init(self, *, hcubes_list, status_tip, context_menu, activated):\n\n # Add the items to the list\n self.addItems(hcubes_list)\n self.setStatusTip(status_tip)\n\n # Set some properties\n self.setSortingEnabled(True)\n self.setAlternatingRowColors(True)\n self.setSelectionMode(self.ExtendedSelection)\n self.setContextMenuPolicy(QC.Qt.CustomContextMenu)\n\n # Set signal handling\n self.customContextMenuRequested.connect(context_menu)\n self.itemActivated.connect(activated)\n\n # Make sure the items in the list are sorted\n self.sortItems()",
"def initMenus(self):\n menu_items = eval(file_io.load_config(MENU_FILE))\n menubar = self.menuBar()\n\n for menu in menu_items:\n newMenu = menubar.addMenu(menu[0])\n for action in menu[1]:\n if action[\"name\"] == \"sep\":\n newMenu.addSeparator()\n continue\n newAction = QtGui.QAction(action[\"name\"], self)\n newAction.setShortcut(action[\"shortcut\"])\n newAction.setStatusTip(action[\"tip\"])\n newAction.triggered.connect(action[\"cb\"])\n newMenu.addAction(newAction)",
"def createMenus(self):\n\t\tself.fileMenu = self.menuBar().addMenu(\"&File\")\n\t\tself.editMenu = self.menuBar().addMenu(\"&Edit\")\n\t\tself.helpMenu = self.menuBar().addMenu(\"&Help\")",
"def createMenu(self):\r\n self.menuFile = self.menuBar().addMenu(\"&File\")\r\n self.menuFile.addAction(self.actionQuit)\r\n self.menuFile.addAction(self.actionImportFile)\r\n self.menuFile.addAction(self.actionExportFile)\r\n\r\n self.menuContacts = self.menuBar().addMenu(\"&Contact\")\r\n self.menuContacts.addAction(self.actionNewContact)\r\n self.menuContacts.addAction(self.actionModContact)\r\n self.menuContacts.addAction(self.actionDelContact)\r\n self.menuContacts.addAction(self.actionDisplay)\r\n\r\n self.menuHelp = self.menuBar().addMenu(\"&?\")\r\n self.menuHelp.addAction(self.actionAbout)",
"def __initMenus(self):\n self.__menuItems=[\"Login\", \"Play\", \"Setup\", \"Quit\"]\n #self.__menuItems=[\"Login\", \"Play\", \"Single Player\", \"Multiplayer\", \"Setup\", \"Quit\"]",
"def create_menu_item(self,menu):\n sql =(\n \"\"\"INSERT INTO menu (menu_name, menu_price, description, menu_image ) \n VALUES('{}','{}','{}','{}');\n \"\"\".format(menu.menu_name,menu.menu_price, menu.description, menu.menu_image)\n )\n self.cur.execute(sql)\n self.conn.commit()",
"def _add_menu_items(self):\r\n self.mfile.AppendItem(self.mf_close)\r\n self.mfile.AppendItem(self.mf_exit)\r\n\r\n self.medit.AppendItem(self.me_redraw)\r\n self.medit.AppendItem(self.me_pref)\r\n self.medit.AppendSeparator()\r\n self.medit.AppendItem(self.me_run)\r\n\r\n self.mview.AppendItem(self.mv_zoomfit)\r\n self.mview.AppendSeparator()\r\n\r\n self.mopts.AppendItem(self.mo_limits)\r\n self.mopts.AppendItem(self.mo_emails)",
"def rcmenu_item():\n yield keyword(\"menuitem|separator|submenu\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n label = yield quoted\n yield normalspaces()\n vnarg = yield sepBy(named_argument, singlelinespaces())\n return s.Construct(s.RCMENU_ITEM, varname, label, vnarg)",
"def setUp(self) -> None:\n self.first_menu = Menu(2000, 70, 60, 300, ['tomato'])\n self.second_menu = Menu(1900, 60, 65, 290, [])",
"def inventory_menu(con, header, player, inventory_width, screen_width, screen_height):\n # show a menu with each item of the inventory as an option\n if len(player.inventory.items) == 0:\n options = ['Inventory is empty.']\n else:\n options = []\n for item in player.inventory.items:\n if player.equipment.main_hand == item:\n options.append('{0} (oi main hand)'.format(item.name))\n elif player.equipment.off_hand == item:\n options.append('{0} (in off hand)'.format(item.name))\n else:\n options.append(item.name)\n\n menu(con, header, options, inventory_width, screen_width, screen_height)",
"def create_menubar(self):\n # The file menu\n self.file_menu = self.menuBar().addMenu(\"&File\")\n quit_action = create_action(self,\"&Quit\", slot=self.quit,\n shortcut=\"Ctrl+Q\", tip=\"Close the application\")\n add_actions(self.file_menu, (None, quit_action))\n\n # The configuration menu\n self.config_menu = self.menuBar().addMenu(\"&Config\")\n create_option_menu(self,\n self.config_menu,\n \"&Logging level\",\n self.set_loglevel,\n ['Debug','Info','Warning','Error','Critical'])\n \n create_option_menu(self,\n self.config_menu,\n \"&Plot window\",\n self.make_plot_window,\n self.roach_keys+['all'])\n create_option_menu(self,\n self.config_menu,\n \"Power &Scale\",\n self.set_power_scale,\n [\"Linear\",\"Logarithmic\"])\n create_option_menu(self,\n self.config_menu,\n \"&Refresh timer\",\n self.timer_action,\n [\"Start\", \"Stop\"])\n # The help menu\n self.help_menu = self.menuBar().addMenu(\"&Help\")\n about_action = create_action(self,\"&About\",\n shortcut='F1', slot=self.on_about,\n tip='About the demo')\n add_actions(self.help_menu, (about_action,))",
"def menuBarLayout(string, docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", numberOfPopupMenus=bool, useTemplate=\"string\", width=int, dragCallback=\"string\", numberOfChildren=bool, highlightColor=float, annotation=\"string\", enable=bool, preventOverride=bool, popupMenuArray=bool, childArray=bool, exists=bool, numberOfMenus=bool, visible=bool, enableBackground=bool, visibleChangeCommand=\"string\", menuArray=bool, fullPathName=bool, dropCallback=\"string\", menuBarVisible=bool, noBackground=bool, backgroundColor=float, manage=bool, menuIndex=\"string\", isObscured=bool):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of windows which are children of this window. If the optional 'all' parameter is set to a true value, all such windows will be returned regardless of whether they have any name information.
|
def children(self, all=0):
s = _xwininfo(self.identifier, "children")
return self._descendants(s, all and self.find_all or self.find_named)
|
[
"def _get_window_list(self):\n if not self.workspace:\n logger.debug(\"Getting list of windows.\")\n leaves = self.tree.leaves()\n if self.scratch:\n return [\n leave\n for leave in leaves\n if leave.parent.scratchpad_state in [\"changed\", \"fresh\"]\n ]\n else:\n return leaves\n else:\n logger.debug(\n \"Getting list of windows on workspace: {}.\".format(self.workspace)\n )\n workspaces = self.tree.workspaces()\n for workspace in workspaces:\n if workspace.name == self.workspace:\n return workspace.leaves()\n return []",
"def list_children(self):\n return self._list(self.client, children_of_group=self.name)",
"def children(self):\n return Query(self.nodes, False)",
"def __searchHwnds(name: str) -> list:\n hwnds = []\n def foreach_window(hwnd, lParam):\n if name in win32gui.GetWindowText(hwnd):\n hwnds.append(hwnd)\n win32gui.EnumWindows(foreach_window, None)\n return hwnds",
"def allWidgets(self, object):\n\n if not object.isWidgetType():\n return []\n result = []\n if object.isVisible() and object.focusPolicy() != Qt.NoFocus and object.isEnabled():\n if object.inherits('QLineEdit'):\n if not object.isReadOnly():\n result += [object]\n else:\n result += [object]\n for child in object.children():\n result += self.allWidgets(child)\n return result",
"def getChildren(self):\n return self.children",
"def has_seen_all_children(self) -> bool:\n return self._has_seen_all_children",
"def user32_EnumChildWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWndParent\", \"lpEnumFunc\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def widgets(self):\r\n l = []\r\n for i in range(self.count()):\r\n w = self.widget(i)\r\n if w:\r\n l.append(w)\r\n return l",
"def all(self):\n def walk(nodes):\n for node in nodes:\n yield node\n if self.recurse and node.is_container:\n for result in walk(node.children):\n yield result\n return Query(walk(self))",
"def all_children_seen(self):\n self._has_seen_all_children = True",
"def cmd_internal_windows(self):\r\n return [\r\n i.info() for i in self.windowMap.values()\r\n if isinstance(i, window.Internal)\r\n ]",
"def getChildren(self) -> \"SoChildList *\":\n return _coin.SoWWWInline_getChildren(self)",
"def get_children_of_folderish(context):\n brains = api.content.find(\n context=context,\n depth=1,\n sort_on='getObjPositionInParent'\n )\n results = [b.getObject() for b in brains]\n return results",
"def get_stacked_clients(self) -> List[wrappers.Window]:\n result = xlib.get_window_property(\n display=self.dpy,\n window=self.root,\n property=self.atom[\"_NET_CLIENT_LIST_STACKING\"],\n type=self.atom[\"WINDOW\"] or 0,\n )\n return [] if not result else [self.create_window(window_id=r) for r in cast(List[int], result)]",
"def getChildren(self) -> \"SoChildList *\":\n return _coin.SoSpotLightManip_getChildren(self)",
"def getChildren(self) -> \"SoChildList *\":\n return _coin.SoPointLightManip_getChildren(self)",
"def getChildren(self) -> \"SoChildList *\":\n return _coin.SoGroup_getChildren(self)",
"def get_opened(self):\n\n opened = []\n\n def create_opened_callback(hwnd, opened):\n if win32gui.IsWindowVisible(hwnd):\n opened.append(hwnd)\n\n win32gui.EnumWindows(create_opened_callback, opened)\n\n return opened",
"def children(self):\n ret = self._get_attr(\"children\")\n return [ISnapshot(a) for a in ret]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of windows which are descendants of this window. If the optional 'all' parameter is set to a true value, all such windows will be returned regardless of whether they have any name information.
|
def descendants(self, all=0):
s = _xwininfo(self.identifier, "tree")
return self._descendants(s, all and self.find_all or self.find_named)
|
[
"def _get_window_list(self):\n if not self.workspace:\n logger.debug(\"Getting list of windows.\")\n leaves = self.tree.leaves()\n if self.scratch:\n return [\n leave\n for leave in leaves\n if leave.parent.scratchpad_state in [\"changed\", \"fresh\"]\n ]\n else:\n return leaves\n else:\n logger.debug(\n \"Getting list of windows on workspace: {}.\".format(self.workspace)\n )\n workspaces = self.tree.workspaces()\n for workspace in workspaces:\n if workspace.name == self.workspace:\n return workspace.leaves()\n return []",
"def list_descendants(self):\n return self._list(self.client, descendants_of_group=self.name)",
"def __searchHwnds(name: str) -> list:\n hwnds = []\n def foreach_window(hwnd, lParam):\n if name in win32gui.GetWindowText(hwnd):\n hwnds.append(hwnd)\n win32gui.EnumWindows(foreach_window, None)\n return hwnds",
"def descendants(self) -> QuerySet['TreeModel']:\n return self.__class__.objects.exclude(id=self.id).filter(path__ancestor=self.path)",
"def allWidgets(self, object):\n\n if not object.isWidgetType():\n return []\n result = []\n if object.isVisible() and object.focusPolicy() != Qt.NoFocus and object.isEnabled():\n if object.inherits('QLineEdit'):\n if not object.isReadOnly():\n result += [object]\n else:\n result += [object]\n for child in object.children():\n result += self.allWidgets(child)\n return result",
"def cmd_internal_windows(self):\r\n return [\r\n i.info() for i in self.windowMap.values()\r\n if isinstance(i, window.Internal)\r\n ]",
"def get_descendants(self) -> List['Node']:\n descendants = self.children[:]\n for child in self.children:\n descendants += child.get_descendants()\n return descendants",
"def set_all_windows(self, action=None):\n for mw in self.mw[:self._a] + self.rw[:len(self.rw_inds)]:\n for i in range(len(self.bin_actions)):\n mw.bin_actions[i].setChecked(self.bin_actions[i].isChecked())\n mw.set_bins()\n for i in range(len(self.fit_methods)):\n mw.fit_methods[i].setChecked(self.fit_methods[i].isChecked())",
"def showAllAttendants(self):\n data_list = self.__logic.showAllAttendants()\n self.__data_printer.printAllEmps(data_list)",
"def widgets(self):\r\n l = []\r\n for i in range(self.count()):\r\n w = self.widget(i)\r\n if w:\r\n l.append(w)\r\n return l",
"def by(self, **criteria):\n from .base_application import WindowSpecification\n # default to non top level windows because we are usually\n # looking for a control\n if 'top_level_only' not in criteria:\n criteria['top_level_only'] = False\n\n criteria['backend'] = self.backend.name\n criteria['parent'] = self.element_info\n child_specification = WindowSpecification(criteria)\n\n return child_specification",
"def all(self):\n def walk(nodes):\n for node in nodes:\n yield node\n if self.recurse and node.is_container:\n for result in walk(node.children):\n yield result\n return Query(walk(self))",
"def list(self, sleep=1):\n\n if sleep > 0:\n time.sleep(sleep)\n\n if self.useUiAutomator:\n raise Exception(\"Not implemented yet: listing windows with UiAutomator\")\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((VIEW_SERVER_HOST, self.localPort))\n except socket.error, ex:\n raise RuntimeError(\"ERROR: Connecting to %s:%d: %s\" % (VIEW_SERVER_HOST, self.localPort, ex))\n s.send('list\\r\\n')\n received = \"\"\n doneRE = re.compile(\"DONE\")\n while True:\n received += s.recv(1024)\n if doneRE.search(received[-7:]):\n break\n s.close()\n\n self.windows = {}\n for line in received.split('\\n'):\n if not line:\n break\n if doneRE.search(line):\n break\n values = line.split()\n if len(values) > 1:\n package = values[1]\n else:\n package = \"UNKNOWN\"\n if len(values) > 0:\n wid = values[0]\n else:\n wid = '00000000'\n self.windows[int('0x' + wid, 16)] = package\n return self.windows",
"def archived_descendants(self):\n return self.get_descendants().filter(is_archived=True)",
"def dispatchAllWindowEvents(cls):\n wins = pyglet.window.get_platform().get_default_display().get_windows()\n for win in wins: win.dispatch_events()",
"def all_children_seen(self):\n self._has_seen_all_children = True",
"def find_viewers():\n stack = QtGui.QApplication.topLevelWidgets()\n viewers = []\n while stack:\n widget = stack.pop()\n if widget.objectName().startswith('Viewer.'):\n viewers.append(widget)\n stack.extend(c for c in widget.children() if c.isWidgetType())\n return viewers",
"def get_opened(self):\n\n opened = []\n\n def create_opened_callback(hwnd, opened):\n if win32gui.IsWindowVisible(hwnd):\n opened.append(hwnd)\n\n win32gui.EnumWindows(create_opened_callback, opened)\n\n return opened",
"def __groupByWindow(self):\n windows = self.__mw.mainWindows()\n \n self.__isRefreshing = True\n \n winCount = 0\n for mainWin in windows:\n winCount += 1\n winItem = self.__createEmptyItem()\n winItem.setText(0, self.tr(\"Window {0}\").format(winCount))\n winItem.setToolTip(0, self.tr(\"Double click to switch\"))\n if mainWin == self.__mw:\n font = winItem.font(0)\n font.setBold(True)\n winItem.setFont(0, font)\n winItem.setData(0, TabManagerWidget.WebWindowRole, mainWin)\n \n for browser in mainWin.tabWidget().browsers():\n if self.__page == browser.page():\n self.__page = None\n continue\n \n tabItem = self.__createEmptyItem(winItem)\n if browser == mainWin.tabWidget().currentBrowser():\n font = tabItem.font(0)\n font.setBold(True)\n tabItem.setFont(0, font)\n if not browser.isLoading():\n tabItem.setIcon(0, browser.icon())\n else:\n tabItem.setIcon(0, UI.PixmapCache.getIcon(\"loading.png\"))\n tabItem.setText(0, browser.title())\n tabItem.setToolTip(0, browser.title())\n \n tabItem.setData(0, TabManagerWidget.WebBrowserRole, browser)\n tabItem.setData(0, TabManagerWidget.WebWindowRole, mainWin)\n \n self.__makeWebBrowserViewConnections(browser)",
"def select_all(self):\n\n return self.select_atoms({})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return windows using the given 'callable' (returning a true or a false value when invoked with a window name) for descendants of this window.
|
def find(self, callable):
s = _xwininfo(self.identifier, "tree")
return self._descendants(s, callable)
|
[
"def _find_window(self, predicate, timeout = 10.0):\n window_handle = None\n end_time = time.time() + timeout\n while window_handle is None and end_time > time.time():\n for handle in self.driver.window_handles:\n if predicate(handle):\n window_handle = handle\n break\n\n return window_handle",
"def __searchHwnds(name: str) -> list:\n hwnds = []\n def foreach_window(hwnd, lParam):\n if name in win32gui.GetWindowText(hwnd):\n hwnds.append(hwnd)\n win32gui.EnumWindows(foreach_window, None)\n return hwnds",
"def user32_EnumChildWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWndParent\", \"lpEnumFunc\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def by(self, **criteria):\n from .base_application import WindowSpecification\n # default to non top level windows because we are usually\n # looking for a control\n if 'top_level_only' not in criteria:\n criteria['top_level_only'] = False\n\n criteria['backend'] = self.backend.name\n criteria['parent'] = self.element_info\n child_specification = WindowSpecification(criteria)\n\n return child_specification",
"def get_window_by_title(title, list=None):\n\n result = [x for x in WidgetTree(list) if isinstance(x, Gtk.Window) and\n x.get_title() == title]\n if result:\n return result[0]\n else:\n return result",
"def _find_popup(self, handles, timeout = 10.0):\n return self._find_window(lambda handle: handle not in handles,\n timeout = timeout)",
"def _makewindows(self, indices, window):\n div = divmod(window, 2)\n before = div[0]\n after = div[0] + div[1]\n index = asarray(self.index)\n indices = asarray(indices)\n if where(index == max(indices))[0][0] + after > len(index):\n raise ValueError(\"Maximum requested index %g, with window %g, exceeds length %g\"\n % (max(indices), window, len(index)))\n if where(index == min(indices))[0][0] - before < 0:\n raise ValueError(\"Minimum requested index %g, with window %g, is less than 0\"\n % (min(indices), window))\n masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after, dtype='int') for i in indices]\n return masks",
"def create_windows(self):\n\n # implemented in sub classes",
"def user32_FindWindowEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hwndParent\", \"hwndChildAfter\", \"lpszClass\", \"lpszWindow\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def test_find_top_win_by_class_name_and_title(self):\n # Since the top_level_only is True by default\n # we don't specify it as a criteria argument\n self.dlg.wait('ready')\n caption = 'WPF Sample Application'\n wins = self.app.windows(class_name='Window', name=caption)\n\n # Verify the number of found wrappers\n self.assertEqual(len(wins), 1)\n\n # Verify the caption of the found wrapper\n self.assertEqual(wins[0].texts()[0], caption)",
"def get_opened(self):\n\n opened = []\n\n def create_opened_callback(hwnd, opened):\n if win32gui.IsWindowVisible(hwnd):\n opened.append(hwnd)\n\n win32gui.EnumWindows(create_opened_callback, opened)\n\n return opened",
"def namedWindow(winname, flags=...) -> None:\n ...",
"def get_window_by_prefix(prefix, list=None):\n\n result = [x for x in WidgetTree(list) if\n isinstance(x, Gtk.Window) and\n x.get_title() and\n x.get_title().startswith(prefix)]\n if result:\n return result[0]\n else:\n return result",
"def get_stacked_clients(self) -> List[wrappers.Window]:\n result = xlib.get_window_property(\n display=self.dpy,\n window=self.root,\n property=self.atom[\"_NET_CLIENT_LIST_STACKING\"],\n type=self.atom[\"WINDOW\"] or 0,\n )\n return [] if not result else [self.create_window(window_id=r) for r in cast(List[int], result)]",
"def window_generator(im, window_centers, width):\n for window_center in window_centers:\n window = get_window(im, window_center, width)\n yield window\n \n return",
"def _get_window_list(self):\n if not self.workspace:\n logger.debug(\"Getting list of windows.\")\n leaves = self.tree.leaves()\n if self.scratch:\n return [\n leave\n for leave in leaves\n if leave.parent.scratchpad_state in [\"changed\", \"fresh\"]\n ]\n else:\n return leaves\n else:\n logger.debug(\n \"Getting list of windows on workspace: {}.\".format(self.workspace)\n )\n workspaces = self.tree.workspaces()\n for workspace in workspaces:\n if workspace.name == self.workspace:\n return workspace.leaves()\n return []",
"def _find_window_by_title(self, title, timeout = 10.0):\n def predicate(handle):\n self.driver.switch_to.window(handle)\n return self.driver.title == title\n\n return self._find_window(predicate, timeout = timeout)",
"def test_windows() -> None:\n assert windows([3, 4, 6, 2, 3], 2) == [[3, 4], [4, 6], [6, 2], [2, 3]]\n assert windows(['a', 1, 6.0, False], 3) == [['a', 1, 6.0], [1, 6.0, False]]\n assert windows([], 1) == []",
"def user32_ShowOwnedPopups(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"fShow\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_df_subwindows_from_df_windows(df_windows, n_subwindows=20):\n\n # get df\n rows_iterator = [r for I,r in df_windows.iterrows()]\n df_subwindows = pd.concat(map(lambda r: get_df_subwindows_window_r(r, n_subwindows=n_subwindows), rows_iterator))\n\n # get only those where the start!=end\n df_subwindows = df_subwindows[df_subwindows.start!=df_subwindows.end]\n\n # check\n if any(df_subwindows.end<=df_subwindows.start): \n print(df_subwindows)\n print(df_subwindows[df_subwindows.end<=df_subwindows.start])\n raise ValueError(\"df_subwindows: start should be < end\")\n\n return df_subwindows"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.