query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Generates mapping from water measurements column names to values of the given CSV row. | def get_water_value_map(row, column_names_map):
column_values_map = column_names_map.copy()
row_length = len(row)
empty = True
for key, index in column_names_map.items():
# Check if non-empty value exist for given index.
if -1 < index < row_length:
value = row[index].strip()
if value:
column_values_map[key] = value
empty = False
continue
# Else NULL is inserted in db.
column_values_map[key] = 'NULL'
return None if empty else column_values_map | [
"def create_deft_table_csv_mappings():\n mappings = list()\n mappings.append(CsvColumnMapping(columnName=\"rownumber\", cslDataType=\"int\", ordinal=0))\n mappings.append(CsvColumnMapping(columnName=\"rowguid\", cslDataType=\"string\", ordinal=1))\n mappings.append(CsvColumnMapping(columnName=\"xdouble\", cslDataType=\"real\", ordinal=2))\n mappings.append(CsvColumnMapping(columnName=\"xfloat\", cslDataType=\"real\", ordinal=3))\n mappings.append(CsvColumnMapping(columnName=\"xbool\", cslDataType=\"bool\", ordinal=4))\n mappings.append(CsvColumnMapping(columnName=\"xint16\", cslDataType=\"int\", ordinal=5))\n mappings.append(CsvColumnMapping(columnName=\"xint32\", cslDataType=\"int\", ordinal=6))\n mappings.append(CsvColumnMapping(columnName=\"xint64\", cslDataType=\"long\", ordinal=7))\n mappings.append(CsvColumnMapping(columnName=\"xuint8\", cslDataType=\"long\", ordinal=8))\n mappings.append(CsvColumnMapping(columnName=\"xuint16\", cslDataType=\"long\", ordinal=9))\n mappings.append(CsvColumnMapping(columnName=\"xuint32\", cslDataType=\"long\", ordinal=10))\n mappings.append(CsvColumnMapping(columnName=\"xuint64\", cslDataType=\"long\", ordinal=11))\n mappings.append(CsvColumnMapping(columnName=\"xdate\", cslDataType=\"datetime\", ordinal=12))\n mappings.append(CsvColumnMapping(columnName=\"xsmalltext\", cslDataType=\"string\", ordinal=13))\n mappings.append(CsvColumnMapping(columnName=\"xtext\", cslDataType=\"string\", ordinal=14))\n mappings.append(CsvColumnMapping(columnName=\"xnumberAsText\", cslDataType=\"string\", ordinal=15))\n mappings.append(CsvColumnMapping(columnName=\"xtime\", cslDataType=\"timespan\", ordinal=16))\n mappings.append(CsvColumnMapping(columnName=\"xtextWithNulls\", cslDataType=\"string\", ordinal=17))\n mappings.append(CsvColumnMapping(columnName=\"xdynamicWithNulls\", cslDataType=\"dynamic\", ordinal=18))\n return mappings",
"def get_row_data(row):\n\n # Get Values #\n name = row[0]\n full_name = row[1]\n freq = row[2]\n intensity = row[4]\n line_list = row[6]\n\n # Determine Correct Frequency #\n if str(freq) == \"--\":\n if str(row[3]) == \"--\":\n freq = 0\n else:\n freq = float(str(row[3])) # what is difference? meas freq-ghz vs freq-ghz\n freq = float(freq) # Cast to Float\n freq *= 1000 # Convert to MHz (TEMPORARY)\n\n # Determine Correct Intensity #\n if str(intensity) == \"--\":\n intensity = row[5]\n\n if isnan(intensity):\n intensity = None\n else:\n intensity = float(intensity) # Cast to Float\n if intensity < 0:\n intensity = abs(intensity) ** intensity # |x|^x for actual value\n\n # Return Values\n return name, full_name, freq, intensity, line_list",
"def tsvRowToDict(row):\n return {col: getattr(row, col) for col in row._columns_}",
"def _properties_from_csv_row(row, header, ignored_columns):\n props = {}\n for h, prop in enumerate(header):\n # Handle a strange edge case where the length of the row is longer than the length of the header.\n # We do this to prevent an out of range error.\n x = h\n if x > len(row) - 1:\n x = len(row) - 1\n if row[x] == '' or prop in ignored_columns:\n continue\n else:\n try:\n # We use literal_eval() here to de-stringify numbers, lists and objects in the CSV data\n p = literal_eval(row[x])\n props[prop] = p\n except (SyntaxError, ValueError) as e:\n props[prop] = row[x]\n return props",
"def make_dict(row):\n return dict((key[0], value) for key, value in zip(colnames, row))",
"def create_waves_dict(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n waves_dict = {row[\"Date\"]: row[\"Wave Height\"] for row in reader}\n return waves_dict",
"def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()",
"def get_m_to_me_metabolite_mapping():\n f = pandas.read_csv(fixpath(\"m_to_me_mets.csv\"), index_col=0)[\"me_name\"]\n return f.dropna().to_dict()",
"def map_csv_fields(self):\n etod_csv_fields = {\n 'ctry_id': None,\n 'obst_identifier': None,\n 'obst_name': None,\n 'lon_src': None,\n 'lat_src': None,\n 'agl': None,\n 'amsl': None,\n 'vert_uom': None,\n 'hor_acc': None,\n 'hor_acc_uom': None,\n 'vert_acc': None,\n 'vert_acc_uom': None,\n 'obst_type': None,\n 'lighting': None,\n 'marking': None,\n 'is_group': None,\n }\n\n for field in etod_csv_fields:\n try:\n etod_csv_fields[field] = etod_map[self.ctry_short_name]['fields'][field]\n except KeyError:\n etod_csv_fields[field] = None\n\n self.field_map = etod_csv_fields",
"def find_mapping_columns(self, srcrow, sinkrow = None):\n\n def find_col_in_col_lists(id, col_list):\n cols = [col for col in col_list if col.id == id]\n assert len(cols) <= 1, 'non unique sink ids (%s) in template' % id\n assert len(cols) == 1, 'id (%s) specified does not exist' % id\n return cols[0]\n\n INTEGER = \"[0-9]+\"\n SEQUENCE_COMMA = \"{number}(,{number})*\".format(number=INTEGER)\n\n # Example \"{1,3},2,{3}\"\n if isinstance(self.mappers, str):\n s = re.split(\"({.*?})\", self.mappers) # *? means minimal catch for regular expression. {1},2,{3} will be matched as {1} not {1},2,{3}\n # [{1,3},2,{3}]\n col_data = []\n col_list = []\n for substring in s:\n if substring is None or substring == \"\":\n continue\n match = re.search(SEQUENCE_COMMA, substring)\n if match:\n items = match.group().split(\",\")\n for id in items:\n # {1,3} or {2}\n if \"{\" in substring and \"}\" in substring:\n # The number is the id, compare it with the sink row\n # Find the column, and use the name as key\n try:\n sink_col = find_col_in_col_lists(id, sinkrow)\n col_data.append(sinkrow[sink_col.col_name])\n col_list.append(sink_col)\n except KeyError as e:\n user_error_log.log_mapping_error(\"The sink column id in the mapper string has to be smaller\"\n \"than the id of this column\")\n raise e\n else:\n src_col = find_col_in_col_lists(id, srcrow)\n col_list.append(src_col)\n col_data.append(srcrow[src_col.col_name])\n\n return col_list, col_data\n raise Exception(\"Unexpected error\")",
"def _map(event_name, data):\n pk = _pk(data)\n for (column, value) in data.items():\n yield (event_name, pk, column, value)",
"def TMY_CSV_to_solar_data(filename):\n if not os.path.isfile(filename):\n raise FileNotFoundError(filename + \" does not exist.\")\n wfd = defaultdict(list)\n with open(filename) as file_in:\n info = []\n for i in range(2):\n info.append(file_in.readline())\n info[i] = info[i].split(\",\")\n if \"Time Zone\" not in info[0]:\n raise ValueError(\"`Time Zone` field not found in solar resource file.\")\n latitude = info[1][info[0].index(\"Latitude\")]\n longitude = info[1][info[0].index(\"Longitude\")]\n tz = info[1][info[0].index(\"Time Zone\")]\n elev = info[1][info[0].index(\"Elevation\")]\n reader = csv.DictReader(file_in)\n for row in reader:\n for col, dat in row.items():\n if len(col) > 0:\n wfd[col].append(float(dat))\n\n weather = dict()\n weather['tz'] = float(tz)\n weather['elev'] = float(elev)\n weather['lat'] = float(latitude)\n weather['lon'] = float(longitude)\n weather['year'] = wfd.pop('Year')\n weather['month'] = wfd.pop('Month')\n weather['day'] = wfd.pop('Day')\n weather['hour'] = wfd.pop('Hour')\n weather['minute'] = wfd.pop('Minute')\n weather['dn'] = wfd.pop('DNI')\n weather['df'] = wfd.pop('DHI')\n weather['gh'] = wfd.pop('GHI')\n weather['wspd'] = wfd.pop('Wind Speed')\n weather['tdry'] = wfd.pop('Temperature')\n\n return weather",
"def _fast_map_row(row):\n return {'row': row}",
"def make_row_map(file_path, key_field, field_map=None, transforms=None, \\\n file_encoding=None):\n\n with open(file_path, encoding=file_encoding) as file:\n # preprocess transforms\n if transforms:\n _transforms = {}\n for tf_field, tf in transforms.items():\n _type = type(tf).__name__\n if _type not in ['str', 'function']:\n raise ValueError('Invalid transform')\n _transforms[tf_field] = {\n 'transform': tf,\n 'type': _type\n }\n\n # get fields from csv\n fields_reader = csv.reader(file)\n fields = next(fields_reader)\n\n # make sure we aren't missing any field names\n first_row = next(fields_reader)\n if len(fields) != len(first_row):\n raise ValueError('Header has a different number of columns than data')\n\n # apply field map\n if field_map:\n # TODO use a case insensitive dictionary for field map\n fields = [field_map.get(field.lower()) or field for field in fields]\n key_field = field_map.get(key_field) or key_field\n\n # lowercase\n fields = [field.lower() for field in fields]\n\n # handle spaces\n fields = [field.replace(' ', '_') for field in fields]\n\n # use namedtuple for rows\n fields_joined = ' '.join(fields)\n Row = namedtuple('Row', fields_joined)\n\n # make map\n row_map = {}\n reader = csv.DictReader(file, fieldnames=fields)\n\n for i, row in enumerate(reader):\n key = row[key_field]\n\n # apply transforms\n if transforms:\n for tf_field, tf_map in _transforms.items():\n tf = tf_map['transform']\n tf_type = tf_map['type']\n source_val = row[tf_field]\n if tf_type == 'str':\n val = getattr(source_val, tf)()\n else:\n val = tf(source_val)\n row[tf_field] = val\n\n # row_map[key] = row\n # str_row = {key: str(val) for key, val in row.items()}\n row_map[key] = Row(**row)\n # from pprint import pprint\n # pprint(str_row)\n # row_map[key] = Row(**str_row)\n\n return row_map",
"def list_water_temps(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n temp_list = [temp[\"Water Temp\"] for temp in reader]\n return temp_list",
"def create_dict_from_file(filename, delimeters, first_char, column_names):\n\n # This opens the\n measurement_output = open('measurement_output.txt', \"w\", encoding=\"utf8\")\n # This creates and initializes a list to serve as a dictionary container outside of the for-loop.\n measurements_file_container = {}\n\n # This opens the file and then splits it (preserving the commas because of the landfall count requirement).\n if not filename.endswith('.txt'):\n print('Input File Must Be a .txt File')\n return None\n elif delimeters != '{}=|{}=|{}='.format(column_names[0], column_names[1], column_names[2]):\n print('Please Check Syntax for Delimeters and colunm_names.')\n return None\n else:\n with open(filename, 'r') as infile:\n for line in infile:\n line = line.strip()\n # This checks to see if line begins with a numeric character; if so, it is a header for a new measurement.\n if line[0].isnumeric():\n measurement_current_line = line.split()\n # This initializes a new measurement dictionary with the 3 items in column_names\n key = measurement_current_line[0]\n new_measurement_dictionary = {\n column_names[0]: '0',\n column_names[1]: '0',\n column_names[2]: '0',\n }\n #print(measurement_current_line)\n # this determines if a line starts with 'X', splits it at the X =,Y =,Z = indicators\n # to spit out a list containing only the 3 values and then updates the corresponding\n # value in the dictionary\n if line[0] == first_char:\n measurement_current_line = re.split(delimeters, line.strip(' '))\n if len(measurement_current_line) == 4:\n new_measurement_dictionary[column_names[0]] = float(measurement_current_line[1].strip())\n new_measurement_dictionary[column_names[1]] = float(measurement_current_line[2].strip())\n new_measurement_dictionary[column_names[2]] = float(measurement_current_line[3].strip())\n measurements_file_container[key] = new_measurement_dictionary\n # this stops the processing when the end of data key '$$EOE' is reached.\n elif line == '$$EOE':\n break\n\n\n return(measurements_file_container)",
"def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary",
"def columnar(row_table: list[dict[str, str]]) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n \n first_row: dict[str, str] = row_table[0]\n for column in first_row:\n result[column] = column_values(row_table, column)\n \n return result",
"def row_to_dict(row, field_names):\n dict_row = {}\n for value, field_name in zip(row, field_names):\n if value and str(value).lower() == 'nan':\n value = None\n dict_row[field_name] = value\n return dict_row"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populate water measurements table for selected `archive`, `directory` and `stations`. | def populate_water_measurements(cursor, archive, directory, station):
csv_path = get_data_path(
'water',
'raw',
archive,
directory,
f'{station}.csv'
)
with open(csv_path, 'r', encoding='utf-8') as file:
reader = csv.reader(file, delimiter=';')
header = next(reader)
column_names_map = get_water_index_map(archive, header)
if not column_names_map:
return False
water_body = get_water_definitions(archive)['body']
for row in reader:
column_values_map = get_water_value_map(row, column_names_map)
if column_values_map:
date = datetime.strptime(row[0], '%d.%m.%Y').date()
data_columns = ', '.join(column_values_map.keys())
data_values = ', '.join(column_values_map.values())
cursor.execute(f'''INSERT INTO {water_body}_measurements (station_id, date, {data_columns})
VALUES ({station}, '{str(date)}', {data_values})''')
return True | [
"def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')\n aquifer_count = cursor.fetchone()[0]\n\n if watercourse_count and aquifer_count:\n print('Water tables already populated!')\n return\n\n station_data = get_station_data()\n\n for archive in metadata.keys():\n print(f'{archive}-water:'.upper())\n water_body = get_water_definitions(archive)['body']\n\n # 1. Populate watercourses/aquifers:\n stations = {}\n for water_body_name in metadata[archive].keys():\n print(f'\\tPopulating {water_body}: \"{water_body_name}\"')\n cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)\n VALUES (0, '{water_body_name}')''')\n water_body_id = cursor.lastrowid\n\n # 2. Populate watercourse_stations/aquifer_stations:\n for station_id in metadata[archive][water_body_name]['stations']:\n station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])\n\n if station_id in stations:\n # Prefer watercourses/aquifer with more stations\n current_len = len(metadata[archive][water_body_name]['stations'])\n previous_len = len(metadata[archive][stations[station_id]]['stations'])\n\n if current_len < previous_len:\n print(f'\\t\\tStation already exists: {station_id} - \"{station_name}\" (\"{water_body_name}\")')\n continue\n else:\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station: {station_id} - \"{station_name}\" from \"{stations[station_id]}\")')\n\n stations[station_id] = water_body_name\n print(f'\\t\\tPopulating station: {station_id} - \"{station_name}\"')\n\n # Insert station location if station data exists.\n location_id = 0\n station_row = station_data.query(f'ŠIFRA == \"{station_id}\"')\n if not station_row.empty:\n index = station_row.index[0]\n lat = station_row.at[index, 'LAT']\n lng = station_row.at[index, 'LON']\n if not np.isnan(lat) and not np.isnan(lng):\n name = f\"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})\"\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{name}', {lat}, {lng})''')\n location_id = cursor.lastrowid\n\n # Insert station.\n cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)\n VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')\n\n # 3. Populate watercourse_measurements/aquifer_measurements:\n if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],\n station_id):\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station with useless data: {station_id} - \"{station_name}\"')\n\n # Remove empty watercourses/aquifers.\n cursor.execute(f'''SELECT w.id, w.name\n FROM {water_body}s w\n WHERE NOT EXISTS (\n SELECT s.id \n FROM {water_body}_stations s \n WHERE w.id = s.{water_body}_id\n )''')\n\n for row in cursor.fetchall():\n cursor.execute(f'''DELETE \n FROM {water_body}s\n WHERE id = {row[0]}''')\n print(f'\\tRemoved empty {water_body}: \"{row[1]}\"')",
"def update_weather_for_all_stations():\n\n weather.get_metars(airport_render_config.keys(), logger=LOGGER)",
"def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')",
"def main():\n pollution_data = get_pollution_data()\n date = pollution_data[0]['date']\n stations = select('stations', ['id_station', 'name'], where=f\"date = '{date}'\")\n insert('pollution', join_name_with_id(stations, pollution_data), columns=['id_station', 'measurement_date',\n 'measurement_time', 'no2', 'o3', 'pm25',\n 'so2', 'pm10', 'co', 'c6h6'])",
"def __init__(self):\n self.stations_df = pd.read_csv(WeatherData.data_path + 'all_tx_stations.csv')\n self.weather_df = pd.read_csv(WeatherData.data_path + 'houston_weather.csv')\\\n .merge(self.stations_df, on='station_id', how='inner')\\\n .fillna({'time': 0})\n\n # Convert date to datetime type\n self.weather_df.loc[self.weather_df.time == 2400, 'time'] = 2359\n self.weather_df['date'] = pd.to_datetime(\n self.weather_df.date.astype(str) + ' ' + self.weather_df.time.map(lambda x: f\"{int(x):04d}\"),\n format=\"%Y%m%d %H%M\"\n )\n self.weather_df = self.weather_df.drop(columns='time')\n\n self.weather_df.value = pd.to_numeric(self.weather_df.value)",
"def extract_archive_data():\n extract_from_db_info = [\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_customer',\n 'dest_table': 'raw_customer_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_customer_archive.sql'\n },\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_product',\n 'dest_table': 'raw_product_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_product_archive.sql'\n },\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_sales',\n 'dest_table': 'raw_sales_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_sales_archive.sql'\n }\n ]\n\n for extract_info in extract_from_db_info:\n try:\n extract_data_from_db(extract_info['source_db'], extract_info['dest_db'], extract_info['dest_table'], extract_info['sql_select'], extract_info['sql_insert'])\n except Exception as e:\n print(\"An error occurred: \", e)\n else:\n print(\"Successfully inserted records in {} table of {} database from {} table of {} database.\".format(extract_info['dest_table'], extract_info['dest_db'], extract_info['source_table'], extract_info['source_db']))",
"def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations",
"def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)",
"def load_isd_station(data_dir,station_number,start_year=1950,end_year=datetime.now().year):\n isd_station = {'temp':pd.Series(),'dpt':pd.Series(),'mslp':pd.Series(),'wdir':pd.Series(),'ws':pd.Series(),\n 'sky':pd.Series(),'precip_1':pd.Series(),'precip_6':pd.Series()}\n for year in range(start_year,end_year + 1):\n filename = '{0}-99999-{1}'.format(station_number,year)\n try:\n dataframe = pd.read_csv(data_dir + filename,delim_whitespace=True,header=None,na_values=-9999,\n parse_dates=[[0,1,2,3]],index_col=0)\n except FileNotFoundError:\n continue\n isd_station['temp'] = isd_station['temp'].append(dataframe[4] / 10.0)\n isd_station['dpt'] = isd_station['dpt'].append(dataframe[5] / 10.0)\n isd_station['mslp'] = isd_station['mslp'].append(dataframe[6] / 10.0)\n isd_station['wdir'] = isd_station['wdir'].append(dataframe[7])\n isd_station['ws'] = isd_station['ws'].append(dataframe[8] / 10.0)\n isd_station['sky'] = isd_station['sky'].append(dataframe[9])\n precip_1_without_trace = dataframe[10].copy()\n precip_1_without_trace[precip_1_without_trace == -1.0] = 0.0\n precip_6_without_trace = dataframe[11].copy()\n precip_6_without_trace[precip_6_without_trace == -1.0] = 0.0\n isd_station['precip_1'] = isd_station['precip_1'].append(precip_1_without_trace / (10.0 * 1000))\n isd_station['precip_6'] = isd_station['precip_6'].append(precip_6_without_trace / (10.0 * 1000))\n\n return isd_station",
"def create_station_data():\n\n file = \"station_names_v4.json\"\n\n for line in lines:\n get_stops_for_line(line)\n\n with open(file, \"w\") as destination_file:\n json.dump(stations,destination_file)",
"def insertMeasurement(self, temp, salinity, par, ph, flow):\n ## Create a connection to the database\n ##\n conn = sqlite3.connect(DATA_FILE)\n cur = conn.cursor()\n \n ## Create the table if it does not exist\n ##\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS measurements\n (timestamp TEXT NOT NULL, temperature TEXT NOT NULL, \n salinity TEXT NOT NULL, par TEXT NOT NULL, ph TEXT NOT NULL, \n flow INTEGER NOT NULL)\n \"\"\")\n \n conn.commit()\n \n ## Insert the measurement data into the database\n ##\n cur.execute(\"\"\"INSERT INTO measurements VALUES (DATETIME('now'),?,?,?,?,?)\"\"\", (temp, salinity, par, ph, flow))\n \n \n ## Since this is used in an embedded environment, the file size needs to be kept small.\n ##\n cur.execute(\"\"\"DELETE FROM measurements WHERE timestamp IN \n (SELECT timestamp FROM measurements ORDER BY timestamp ASC LIMIT \n (SELECT (CASE WHEN COUNT(*) - ? < 0 \n THEN 0 \n ELSE COUNT(*) - ? \n END) FROM measurements))\n \"\"\", (MAX_DATABASE_RECORDS, MAX_DATABASE_RECORDS))\n conn.commit()\n cur.execute(\"\"\"VACUUM\"\"\")\n \n ## Close the connection\n ##\n conn.close()\n log.msg(\"Measurements saved in the database\")\n \n ## Clean up the logins table\n ##\n conn = sqlite3.connect(CONFIG_FILE)\n cur = conn.cursor()\n\n cur.execute(\"\"\"DELETE FROM Logins WHERE timestamp < DATETIME('now','-1 day')\"\"\")\n conn.commit()\n conn.close()\n log.msg(\"Logins table cleaned\")\n \n ## Clean up the downloads directory\n ##\n log.msg(\"Downloads directory cleaned\")\n os.chdir(DOWNLOAD_DIRECTORY)\n fileList = os.listdir(os.getcwd())\n for fn in fileList: \n os.remove(os.path.join(os.getcwd(), fn))",
"def _prepare_dataset(self):\n loads = pd.concat(ul.total_experiment_load())\n return [ul.add_temperatures(loads, period) \n for period in ul.experiment_periods()]",
"def add_stats_to_stations():\n stns = pd.read_feather(station_store)\n #ct=0\n stns['LENGTH']=np.nan\n stns['MISSING']=np.nan\n for index, s in stns.iterrows():\n stnid=s['STAID']\n FILE='./data/eca_blend_rr/{}.txt'.format(stnid)\n df=read_rain_from_csv(FILE)\n missing=df['Rainfall_mm'].isnull().sum()/df['Rainfall_mm'].shape[0]\n print(FILE, missing)\n length=df['Rainfall_mm'].shape[0]/360. # convert to years.\n stns.iloc[index, stns.columns.get_loc('LENGTH')] = length\n stns.iloc[index, stns.columns.get_loc('MISSING')] = missing\n stns.iloc[index, stns.columns.get_loc('TXT')] = s['TXT'] + ' ({:.0f}y with m={:.3%})'.format(length, missing)\n #ct+=1\n\n stns.to_feather(station_store)",
"def _setData(self):\n\n if not self.stationId:\n return\n \"\"\" \n # get the ressource url and adjust lat and lon from data portal\n query = sparqls.stationResource(self.stationId)\n key, val = RunSparql(query, 'array').run()\n if val: \n self.url = val[0][0]\n self.lat = float(val[0][2])\n self.lon = float(val[0][3])\n \"\"\"\n\n # it is possible, that a station id has multiple URI\n # ask for all URI\n query = sparqls.stationData(self.uri, 'all')\n data = RunSparql(query, 'pandas').run()\n\n if not data.empty:\n self._data = data\n else:\n self._data = 'no data available'\n\n # check if data is available and extract the 'unique' data products\n if isinstance(self._data, pd.DataFrame):\n p = self._data['specLabel'].unique()\n self._products = pd.DataFrame(p)\n\n # replace samplingheight=None with empty string\n self._data.samplingheight.replace(to_replace=[None], value=\"\", inplace=True)\n else:\n self._products = 'no data available'",
"def load_waterways(sources):\n util.run_sql(f\"DROP TABLE IF EXISTS {WATERWAYS_TABLE_NAME}\", dbname=DBNAME)\n util.run_sql(\n f\"\"\"\n CREATE TABLE {WATERWAYS_TABLE_NAME} (\n id SERIAL PRIMARY KEY,\n name TEXT,\n source VARCHAR(32),\n source_id VARCHAR(32),\n source_id_attr VARCHAR(32),\n type VARCHAR(128),\n is_natural INTEGER DEFAULT 1,\n permanence VARCHAR(64) DEFAULT 'permanent',\n surface VARCHAR(64) DEFAULT 'surface',\n geom geometry(MultiLineString, {SRID})\n )\n \"\"\",\n dbname=DBNAME\n )\n for source in sources:\n source_table_name = f\"{source}_waterways\"\n sql = f\"\"\"\n INSERT INTO {WATERWAYS_TABLE_NAME} (\n name,\n source,\n source_id_attr,\n source_id,\n type,\n is_natural,\n permanence,\n surface,\n geom\n )\n SELECT\n max(name),\n '{source}',\n max(source_id_attr),\n source_id,\n max(type),\n max(is_natural),\n max(permanence),\n max(surface),\n ST_Collect(ST_SimplifyPreserveTopology(geom, 0.00001)) AS geom\n FROM {source_table_name}\n GROUP BY source_id\n \"\"\"\n try:\n util.run_sql(sql)\n except psycopg2.errors.UndefinedTable:\n util.log(f\"{source_table_name} doesn't exist, skipping...\")",
"def load_watersheds(sources):\n util.run_sql(\n f\"DROP TABLE IF EXISTS \\\"{WATERSHEDS_TABLE_NAME}\\\"\",\n dbname=DBNAME\n )\n util.run_sql(\n f\"\"\"\n CREATE TABLE {WATERSHEDS_TABLE_NAME} (\n id SERIAL PRIMARY KEY,\n name TEXT,\n source VARCHAR(32),\n source_id VARCHAR(32),\n source_id_attr VARCHAR(32),\n geom geometry(MULTIPOLYGON, {SRID})\n )\n \"\"\",\n dbname=DBNAME\n )\n util.run_sql(\n f\"DROP TABLE IF EXISTS \\\"{WATERSHEDS_MASK_TABLE_NAME}\\\"\",\n dbname=DBNAME\n )\n util.run_sql(\n f\"\"\"\n CREATE TABLE {WATERSHEDS_MASK_TABLE_NAME} (\n geom geometry(MULTIPOLYGON, {SRID})\n )\n \"\"\",\n dbname=DBNAME\n )\n for source in sources:\n source_table_name = f\"{source}_watersheds\"\n num_mask_rows = util.run_sql(\n f\"SELECT COUNT(*) FROM {WATERSHEDS_MASK_TABLE_NAME}\"\n )[0][0]\n if num_mask_rows == 0:\n try:\n # Insert the first watersheds\n util.run_sql(f\"\"\"\n INSERT INTO {WATERSHEDS_TABLE_NAME} (\n name,\n source,\n source_id_attr,\n source_id,\n geom\n )\n SELECT\n name,\n '{source}',\n source_id_attr,\n source_id,\n geom\n FROM {source_table_name}\n \"\"\")\n # Build the mask\n util.initialize_masks_table(\n WATERSHEDS_MASK_TABLE_NAME, source_table_name, buff=0.0001)\n except psycopg2.errors.UndefinedTable:\n util.log(f\"{source_table_name} doesn't exist, skipping...\")\n continue\n else:\n try:\n source_dump_table_name = f\"{source_table_name}_dump\"\n util.run_sql(f\"DROP TABLE IF EXISTS {source_dump_table_name}\")\n util.run_sql(f\"\"\"\n CREATE TABLE {source_dump_table_name} AS\n SELECT\n name,\n source_id_attr,\n source_id,\n (ST_Dump(\n ST_Difference(\n geom,\n (SELECT geom FROM {WATERSHEDS_MASK_TABLE_NAME})\n )\n )).geom AS geom\n FROM\n {source_table_name}\n \"\"\")\n # Remove the polygons that are entirely within a small buffer of\n # the existing mask, i.e. the slivers that might have resulted\n # from diffing a complex coastline\n util.run_sql(f\"\"\"\n DELETE FROM {source_dump_table_name}\n WHERE ST_Contains(\n (\n SELECT st_buffer(geom, 0.01)\n FROM {WATERSHEDS_MASK_TABLE_NAME}\n ),\n geom\n )\n \"\"\")\n # Insert the massaged polygons as multipolygons\n util.run_sql(f\"\"\"\n INSERT INTO {WATERSHEDS_TABLE_NAME} (\n name,\n source,\n source_id_attr,\n source_id,\n geom\n )\n SELECT\n name,\n '{source}',\n source_id_attr,\n source_id,\n ST_Collect(geom)\n FROM {source_dump_table_name}\n GROUP BY name, source_id_attr, source_id\n \"\"\")\n util.run_sql(f\"DROP TABLE {source_table_name}_dump\")\n # Update the mask\n util.update_masks_table(\n WATERSHEDS_MASK_TABLE_NAME, source_table_name, buff=0.0001)\n except psycopg2.errors.UndefinedTable:\n util.log(f\"{source_table_name} doesn't exist, skipping...\")\n continue",
"def to_dataset(self, num_stations = 1000, Random = False):\n #generate date_range and initialize dataframe\n taxis = pd.date_range('1850-01', '2019-01', freq='M')\n df = pd.DataFrame({'time': taxis})\n \n #setting date_range as the index of the dataset\n df = df.set_index(['time'])\n \n #defining dataset with metadata:\n lat = []\n lon = []\n count = []\n ids = []\n \n #counter for visualization of progress\n counter = 0\n \n #extracting random stations if random = True is passed when calling method\n if Random:\n file_list = random.sample(self.file_list, num_stations)\n \n #redifine the file_list\n self.file_list = file_list\n \n else:\n file_list = self.file_list\n \n for file in tqdm(file_list):\n \n #from class Observation retrieve years and observations\n station = Observation(file)\n \n try:\n #merging with the final dataframe on column time since it is common\n df = pd.merge(df, station.dataset, on = 'time', how='left')\n #filling metadata lat, lon, country and id of station\n lat.append(station.lat_line)\n lon.append(station.lon_line)\n count.append(station.country)\n ids.append(station.id)\n \n except:\n print('Cannot merge station {}/{}, observation = {}'. format(station.id, station.country, len(station.dataset)))\n self.file_list.remove(file)\n continue\n \n #inserting df in the model variables\n self.df = df\n #print(len(ids), len(lon), len(lat), len(count), len(self.file_list))\n #inserting metadata in a datagrame and setting ids as index\n metadata = pd.DataFrame({'ids': ids, 'lon': lon, 'lat': lat, 'country': count})\n metadata = metadata.set_index('ids')\n \n #inserting metadata in the model variables\n self.metadata = metadata\n self.ids_list = ids\n \n #define recover attributes\n \n self.recover = self.df\n self.meta_recover = self.metadata",
"def load_waterbodies(sources):\n util.run_sql(\n f\"DROP TABLE IF EXISTS {WATERBODIES_TABLE_NAME}\", dbname=DBNAME)\n util.run_sql(\n f\"\"\"\n CREATE TABLE {WATERBODIES_TABLE_NAME} (\n id SERIAL PRIMARY KEY,\n name TEXT,\n source VARCHAR(32),\n source_id VARCHAR(32),\n source_id_attr VARCHAR(32),\n type VARCHAR(128),\n is_natural INTEGER DEFAULT 1,\n permanence VARCHAR(64) DEFAULT 'permanent',\n geom geometry(MultiPolygon, {SRID})\n )\n \"\"\",\n dbname=DBNAME\n )\n for source in sources:\n source_table_name = f\"{source}_waterbodies\"\n util.run_sql(f\"\"\"\n INSERT INTO {WATERBODIES_TABLE_NAME} (\n name,\n source,\n source_id_attr,\n source_id,\n type,\n is_natural,\n permanence,\n geom\n )\n SELECT\n name,\n '{source}',\n source_id_attr,\n source_id,\n type,\n is_natural::int,\n permanence,\n geom\n FROM {source_table_name}\n \"\"\")",
"def extend(self, timestamp, onerror=\"raise\"):\n\n with log.LogToTimeFile(timestamp):\n try:\n # first get the ground stations: these determine which points I\n # want to # extract\n logger.info(f\"Loading data for {timestamp:%Y-%m-%d %H:%M:%S}\")\n synop = self.ground.load(timestamp)\n\n # for each non-unique lat/lon, choose the time closest to\n # /timestamp/\n synop = self._select_closest_latlon(synop, timestamp)\n\n lats = synop.index.get_level_values(\"LATITUDE\")\n lons = synop.index.get_level_values(\"LONGITUDE\")\n\n # FIXME: use concurrent.futures here\n # extract will also call .load thus taking care of dependencies\n satdata = self.sat.extract(timestamp, lats, lons)\n nwpdata = self.nwp.extract(timestamp, lats, lons)\n # FIXME: with concurrent.futures, wait for sat and nwp to be\n # finished\n cmicdata = self.cmic.extract(timestamp, lats, lons)\n demdata = self.dem.extract(timestamp, lats, lons)\n # FIXME: with concurrent.futures, wait for cmic and dem to be\n # finished\n fogdata = self.fog.extract(timestamp, lats, lons)\n logger.info(\"Collected all fogdb components, \"\n \"putting it all together\")\n df = _concat_mi_df_with_date(\n satdata,\n synop=synop,\n nwp=nwpdata,\n cmic=cmicdata,\n dem=demdata,\n fog=fogdata)\n if self.data is None:\n self.data = df\n else:\n self.data = pandas.concat([self.data, df], axis=0)\n except (FogDBError, OSError, EOFError):\n if onerror == \"raise\":\n raise\n elif onerror == \"log\":\n logger.exception(\"Failed to extend database with data \"\n f\"from {timestamp:%Y-%m-%d %H:%M:%S}:\")\n else:\n raise ValueError(\"Unknown error handling option: \"\n f\"{onerror!s}\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populate watercourse and aquifer related data tables. | def populate_water_tables(connection):
metadata = load_metadata('water')
cursor = connection.cursor()
# Check if tables are already populated.
cursor.execute('SELECT count(*) FROM watercourses')
watercourse_count = cursor.fetchone()[0]
cursor.execute('SELECT count(*) FROM aquifers')
aquifer_count = cursor.fetchone()[0]
if watercourse_count and aquifer_count:
print('Water tables already populated!')
return
station_data = get_station_data()
for archive in metadata.keys():
print(f'{archive}-water:'.upper())
water_body = get_water_definitions(archive)['body']
# 1. Populate watercourses/aquifers:
stations = {}
for water_body_name in metadata[archive].keys():
print(f'\tPopulating {water_body}: "{water_body_name}"')
cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)
VALUES (0, '{water_body_name}')''')
water_body_id = cursor.lastrowid
# 2. Populate watercourse_stations/aquifer_stations:
for station_id in metadata[archive][water_body_name]['stations']:
station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])
if station_id in stations:
# Prefer watercourses/aquifer with more stations
current_len = len(metadata[archive][water_body_name]['stations'])
previous_len = len(metadata[archive][stations[station_id]]['stations'])
if current_len < previous_len:
print(f'\t\tStation already exists: {station_id} - "{station_name}" ("{water_body_name}")')
continue
else:
cursor.execute(f'''DELETE
FROM {water_body}_stations
WHERE id = {station_id}''')
print(f'\t\tRemoved station: {station_id} - "{station_name}" from "{stations[station_id]}")')
stations[station_id] = water_body_name
print(f'\t\tPopulating station: {station_id} - "{station_name}"')
# Insert station location if station data exists.
location_id = 0
station_row = station_data.query(f'ŠIFRA == "{station_id}"')
if not station_row.empty:
index = station_row.index[0]
lat = station_row.at[index, 'LAT']
lng = station_row.at[index, 'LON']
if not np.isnan(lat) and not np.isnan(lng):
name = f"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})"
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{name}', {lat}, {lng})''')
location_id = cursor.lastrowid
# Insert station.
cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)
VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')
# 3. Populate watercourse_measurements/aquifer_measurements:
if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],
station_id):
cursor.execute(f'''DELETE
FROM {water_body}_stations
WHERE id = {station_id}''')
print(f'\t\tRemoved station with useless data: {station_id} - "{station_name}"')
# Remove empty watercourses/aquifers.
cursor.execute(f'''SELECT w.id, w.name
FROM {water_body}s w
WHERE NOT EXISTS (
SELECT s.id
FROM {water_body}_stations s
WHERE w.id = s.{water_body}_id
)''')
for row in cursor.fetchall():
cursor.execute(f'''DELETE
FROM {water_body}s
WHERE id = {row[0]}''')
print(f'\tRemoved empty {water_body}: "{row[1]}"') | [
"def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()",
"def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()",
"def load_dwh_tables(self):\n print(\"Loading the creative works table\")\n self.cur.execute(dwh_queries.INSERT_CREATIVE_WORKS_SQL_QUERY)\n self.conn.commit()\n\n print(\"Loading the participations table\")\n\n self.cur.execute(dwh_queries.INSERT_PARTICIPATIONS_SQL_QUERY)\n self.conn.commit()",
"def create_data(self):\n \n if self.table not in metadata.tables.keys():\n return print(f\"{self.table} does not exist\")\n\n if self.table == \"customers\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n insert_stmt = customers.insert().values(\n first_name = faker.first_name(),\n last_name = faker.last_name(),\n email = faker.email(),\n address = faker.address(),\n dob = faker.date_of_birth(minimum_age=16, maximum_age=60)\n )\n conn.execute(insert_stmt)\n\n if self.table == \"products\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n insert_stmt = products.insert().values(\n name = random.choice(product_list),\n price = faker.random_int(1,100000) / 100.0\n )\n conn.execute(insert_stmt)\n\n if self.table == \"stores\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n insert_stmt = stores.insert().values(\n address = faker.address()\n )\n conn.execute(insert_stmt)\n\n if self.table == \"transactions\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n date_obj = datetime.datetime.now() - datetime.timedelta(days=random.randint(0,30))\n\n insert_stmt = transactions.insert().values(\n transaction_date=date_obj.strftime(\"%Y/%m/%d\"),\n customer_id=random.choice(conn.execute(select([customers.c.customer_id])).fetchall())[0],\n product_id=random.choice(conn.execute(select([products.c.product_id])).fetchall())[0],\n store_id=random.choice(conn.execute(select([stores.c.store_id])).fetchall())[0]\n )\n conn.execute(insert_stmt)",
"def setUp(self):\n resume.objects.create(\n first_name='Nicholas',\n last_name='Bielinski',\n )\n experience.objects.create(\n title='Helpdesk Technician',\n location='L3 Technologies',\n start_date='6/26/2017',\n end_date='present',\n description='blah blah blah'\n )\n education.objects.create(\n institution_name='UNH Manchester',\n location='Manchester',\n degree='Bachelor',\n major='CIS',\n gpa = '3.5'\n )",
"def synthesize_employment_data(self, config):\r\n jobs_by_zone_by_sector_table_name = config['jobs_by_zone_by_sector']\r\n gridcells_table_name = config['gridcells']\r\n jobs_table_name = config['jobs']\r\n gridcells_output_table_name = config['gridcells_output']\r\n jobs_output_table_name = config['jobs_output']\r\n \r\n input_db_name = config['db_config'].database_name\r\n output_db_name = config['output_database_name']\r\n \r\n sectors = config['sector_names_and_ids']\r\n building_types_and_ids_and_home_based = config[\r\n 'building_type_column_names_and_ids_and_home_based']\r\n \r\n building_types = []\r\n building_ids = []\r\n home_based = [] \r\n for type, id, home in building_types_and_ids_and_home_based:\r\n building_types += [type]\r\n building_ids += [id]\r\n home_based += [home]\r\n \r\n \r\n from_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = input_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n to_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = output_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n\r\n FlattenScenarioDatabaseChain().copy_scenario_database(\r\n from_database_configuration = from_database_configuration, \r\n to_database_configuration = to_database_configuration,\r\n tables_to_copy = [gridcells_table_name, jobs_table_name])\r\n \r\n db_server = DatabaseServer(to_database_configuration) \r\n output_database = db_server.get_database(output_db_name)\r\n \r\n sector_name = 0; sector_id = 1\r\n \r\n sector = {}\r\n for entry in sectors:\r\n name = entry[sector_name]\r\n id = entry[sector_id]\r\n sector[id] = self._get_jobs_per_building_type_in_sector_by_zone(\r\n output_database, jobs_by_zone_by_sector_table_name, \r\n jobs_table_name, name, id)\r\n\r\n results = self._get_building_type_proportion_by_zone(output_database, \r\n gridcells_table_name)\r\n \r\n grid_id = 0; zone_id = 1\r\n dist = {}\r\n \r\n type_index = {}\r\n \r\n for name in building_types:\r\n for i in range(len(results[0])):\r\n column_name = results[0][i]\r\n if name == column_name:\r\n type_index[name] = i\r\n break;\r\n else:\r\n raise KeyError, ('No column by the name of \\'%s\\' found in '\r\n 'the database.' % name) \r\n\r\n for name in building_types:\r\n dist[name] = {}\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] = []\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] += [(row[grid_id], \r\n row[type_index[name]])]\r\n \r\n jobs_table_data = self._create_jobs_table_data(dist, sector,\r\n building_types_and_ids_and_home_based)\r\n \r\n output_database.execute('USE %(out_db)s' % {'out_db':output_db_name})\r\n \r\n output_database.execute(\"\"\"\r\n CREATE TABLE %(jobs_out)s (\r\n JOB_ID INT AUTO_INCREMENT, PRIMARY KEY(JOB_ID),\r\n GRID_ID INT, HOME_BASED INT, SECTOR_ID INT, BUILDING_TYPE INT);\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n \r\n if len(jobs_table_data) > 0:\r\n output_prefix = (\r\n \"\"\"INSERT INTO %(jobs_out)s \r\n (GRID_ID, HOME_BASED, SECTOR_ID, BUILDING_TYPE) VALUES\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n output_postfix = ';'\r\n \r\n step = 1000\r\n length = len(jobs_table_data)\r\n iterations = int(length/step) + 1\r\n \r\n for i in range(iterations):\r\n low = i*step\r\n high = (i+1)*step\r\n \r\n if high > length: high = length\r\n \r\n output_body = \"\"\r\n \r\n for j in range(low, high):\r\n output_body += (\r\n '(%(grid)s, %(home)s, %(sector)s, %(building)s),\\n' \r\n % jobs_table_data[j])\r\n \r\n output_query = \"%s%s%s\" % (output_prefix, \r\n output_body[:-2], \r\n output_postfix)\r\n\r\n output_database.execute(output_query)\r\n \r\n \r\n ### TODO: \r",
"def setupDatabase():\n\tconn=sqlite3.connect(allcoursesdatabase)\n\tcurs=conn.cursor()\n\tcurs.execute('DROP TABLE courseinfo')\n\tcurs.execute('CREATE TABLE courseinfo (id INTEGER PRIMARY KEY NOT NULL,title NOT NULL,code NOT NULL,instructor NOT NULL,unitsmin INTEGER NOT NULL,unitsmax INTEGER NOT NULL,description)')\n\tconn.commit()\n\tconn.close()",
"def init_tables():\n # drop_table_m_candidates()\n # drop_table_m_qiita_users()\n create_table_m_candidates()\n create_table_m_qiita_users()",
"def populate_database(num_patients, min_checkins, max_checkins):\n departments = [\n Department(department_name=\"Cardiology\"),\n Department(department_name=\"Emergency\"),\n Department(department_name=\"Gynecology\"),\n Department(department_name=\"Pediatrics\"),\n Department(department_name=\"Obstetrics\"),\n Department(department_name=\"Oncology\"),\n Department(department_name=\"Orthopedics\"),\n Department(department_name=\"Neurology\")\n ]\n\n for i in xrange(num_patients):\n patient = Patient(**generate_patient())\n patient.departments.append(choice(departments))\n db.add(patient)\n\n for j in xrange(randrange(min_checkins, max_checkins)):\n checkin = CheckIn(**generate_checkin())\n checkin.patient_nhi = patient.nhi\n\n lci = patient.latest_checkin_time\n vid = checkin.checkin_time\n\n lci = vid if lci is None or vid > lci else lci\n patient.latest_checkin_time = lci\n\n db.add(checkin)\n\n for k in xrange(randrange(0, 3)):\n appointment = Appointment(**generate_appointment())\n appointment.patient_nhi = patient.nhi\n\n db.add(appointment)\n\n db.commit()",
"def init_database(self):\n from trainer.models import Subject, Owner\n\n default_owner_id = \"000000000000000000000\"\n default_owner_name = \"Legacy Owner\"\n owner_record, created = Owner.objects.get_or_create(id = default_owner_id, name = default_owner_name)\n if created:\n logger.info(\"Added Owner: %s\" %owner_record)\n owner_record.save()\n else:\n logger.info(\"Existing Owner: %s\" %owner_record)\n\n data_folder_path = self.working_dir+\"/training-data\"\n #get the directories (one directory for each subject) in data folder\n dirs = os.listdir(data_folder_path)\n for dir_name in dirs:\n\n #ignore system files like .DS_Store\n if dir_name.startswith(\".\"):\n continue;\n\n subject = dir_name\n subject_record, created = Subject.objects.get_or_create(name = subject, owner = owner_record, in_training = True)\n if created:\n logger.info(\"Added Subject: %s\" %subject_record)\n subject_record.save()\n else:\n logger.info(\"Existing Subject: %s\" %subject_record)",
"def run(self):\n\n for table in self.TABLES:\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}_Work\")\n self.dictionary_conn.commit()\n self.logger.info(\"work tables cleared\")\n for id in self.ids:\n drug = self.Drug(self, id)\n if drug.wanted:\n drug.load()\n self.logger.info(\"work tables populated\")\n for table in self.TABLES:\n insert = f\"INSERT INTO {table} SELECT * FROM {table}_Work\"\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}\")\n self.dictionary_cursor.execute(insert)\n self.dictionary_conn.commit()\n self.logger.info(\"live tables ready\")",
"def populate_db():\n from dataservice.util.data_gen.data_generator import DataGenerator\n dg = DataGenerator()\n dg.create_and_publish_all()",
"def populate_db():\n # create connexion to application's database\n db = manageDB.UseDB()\n\n # create query handler\n action = manageDB.Command()\n\n # Requesting OFF API for different food categories\n data_set = requestAPI.FoodAPI()\n\n categories = ['fruits', 'legumes-et-derives', 'produits-laitiers',\n 'viandes', 'poissons', 'boissons',\n 'aliments-et-boissons-a-base-de-vegetaux', 'petit-dejeuners',\n 'snacks']\n\n for category in categories:\n data_set.call_for(category, qt='100')\n\n # prepare data before populating the database\n parser = prepareData.Parser(data_set)\n cleaned_data = parser.prepare_data()\n\n # add category in database 'categories' table and get it's id\n action.add_categories(db, category)\n category_id = action.find_category_id(db, category)\n\n # add category id number to each product of the cleaned data set \\\n # before insertion in app's database\n for product in cleaned_data:\n product.update(category_id=category_id)\n action.add_products(db, product)",
"def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')",
"def fill_conference(data): # filling the conferences table\r\n conferences = [c for c in data if c.get('edition') == 'Conference']\r\n for c in conferences:\r\n curs.execute(r'select aut_id from authors where aut_name=?;', [c.get('Author')])\r\n aut_id = curs.fetchone()[0]\r\n curs.execute(r\"insert into conferences(title, aut_id, booktitle, year, language, pages, tag)\"\r\n r\"values(?,?,?,?,?,?,?);\", (c.get('Title', 'Null'), int(aut_id), str(c.get('Booktitle', \"Null\")),\r\n str(c.get('Year', \"Null\")), c.get('Language', \"Null\"),\r\n c.get('Pages', 'Null'), c.get(\"tag\", \"Null\")\r\n )\r\n )\r\n conn.commit()",
"def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()",
"def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)",
"def prepare_database(self, waterscenario=None, trafficscenario=None):\n\n # Validate input\n if waterscenario:\n waterscenario = Path(waterscenario)\n assert waterscenario.exists(), 'Waterscenario file not found'\n\n BIVAS = pyBIVAS(self.BIVAS_database)\n df_trafficscenarios = BIVAS.trafficscenario_numberoftrips()\n\n\n # Do changes to database:\n con = sqlite3.connect(self.BIVAS_database)\n c = con.cursor()\n\n # Update waterscenario with given file\n if waterscenario:\n # Delete current water_scenario_values\n sql = \"DELETE FROM water_scenario_values WHERE 1\"\n c.execute(sql)\n\n sql = \"DELETE FROM water_scenarios WHERE 1\"\n c.execute(sql)\n\n # Write waterdata to database\n\n # Read waterscenario file\n df = pd.read_csv(waterscenario, header=0, index_col=None)\n df = df[['ArcID', 'SeasonID', 'WaterLevel__m', 'RateOfFlow__m3_s', 'WaterSpeed__m_s', 'WaterDepth__m']]\n df['WaterScenarioID'] = 1\n\n # Add new water_scenario\n df.to_sql('water_scenario_values', con,\n if_exists='append', index=False)\n\n # Rename water_scenario\n # waterscenario_name = waterscenario.stem\n # sql = \"\"\"UPDATE water_scenarios SET Description = \"{}\" WHERE ID = {}\"\"\".format(\n # waterscenario_name, waterscenario)\n # c.execute(sql)\n\n\n waterscenario_id = 1\n waterscenario_name = 'TEST waterscenario'\n waterscenario_type = 1\n sql = \"\"\"INSERT into water_scenarios VALUES ({}, '{}', {})\"\"\".format(\n waterscenario_id,\n waterscenario_name,\n waterscenario_type\n )\n c.execute(sql)\n\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = 1 WHERE 1\"\"\"\n c.execute(sql)\n\n else:\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = NULL WHERE 1\"\"\"\n c.execute(sql)\n\n # Set scenario name and description\n date_string = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.description = f'Date: {date_string}, Waterscenario: {waterscenario}, TrafficScenario: {trafficscenario},'\n\n sql = \"\"\"\n UPDATE scenarios\n SET Name = \"{}\",\n Description = \"{}\"\n WHERE ID = {}\n \"\"\".format(\n self.scenarioName, self.description, self.scenarioID)\n c.execute(sql)\n\n # Update traffic Scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n if trafficscenario:\n if isinstance(trafficscenario, int):\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficscenario)\n c.execute(sql)\n else:\n trafficScenarioID = df_trafficscenarios.index[df_trafficscenarios['Description'] == trafficscenario][0]\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficScenarioID)\n c.execute(sql)\n\n con.commit()\n con.close()\n\n logger.info('BIVAS database copied and updated')",
"def orca_year_dataset(hdf, tbls_to_load, year, is_base):\n\n orca.clear_cache()\n orca.add_injectable(\"year\", year)\n hdf_year = \"base\" if is_base or (year == 2019) else year\n\n for tbl in tbls_to_load:\n if (year == 2019) and (tbl == \"jobs\"):\n name = f\"{hdf_year}/{tbl}_2019\"\n else:\n name = f\"{hdf_year}/{tbl}\"\n if name in hdf:\n df = hdf[name]\n else:\n sub_name = [n for n in hdf.keys() if tbl in n][0]\n print(f\"No table named {name}. Using the structure from {sub_name}.\")\n df = hdf[sub_name].iloc[0:0]\n\n if tbl in {\"households\", \"jobs\"} and \"large_area_id\" not in df.columns:\n print(\"impute large_area_id\")\n df[\"large_area_id\"] = misc.reindex(\n orca.get_table(\"buildings\").large_area_id, df.building_id\n )\n orca.add_table(tbl, df.fillna(0))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populate locations data table. | def populate_locations(connection):
print('Populating locations...')
cursor = connection.cursor()
with open(get_data_path('locations', 'locations.json'), 'r', encoding='utf-8') as json_file:
locations = json.load(json_file)
for station_id, location in locations.items():
cursor.execute(f'''SELECT id
FROM watercourse_stations
WHERE id = {station_id}''')
if len(cursor.fetchall()):
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')
cursor.execute(f'''UPDATE watercourse_stations
SET location_id = {cursor.lastrowid}
WHERE id = {station_id}''') | [
"def __createData(self):\n self.locations = []\n\n # open whitespace-delimited file from url and read lines from it:\n data = pd.read_csv('cities.csv')\n path_first = pd.read_csv('sample_submission.csv')\n first_path = [id for id in path_first[\"Path\"]]\n index_cities = [city_id for city_id in data[\"CityId\"]]\n coord_x = [x for x in data[\"X\"]]\n coord_y = [y for y in data[\"Y\"]]\n\n self.tspSize = len(index_cities)\n for i in range(len(index_cities)):\n self.locations.append([coord_x[i], coord_y[i]])\n\n # initialize distance matrix by filling it with 0's:\n self.distances = [[0] * self.tspSize for _ in range(self.tspSize)]\n\n # populate the distance matrix with calculated distances:\n for i in range(self.tspSize):\n for j in range(i + 1, self.tspSize):\n summ = 0.0\n for coord1, coord2 in zip(self.locations[i], self.locations[j]):\n summ += pow((coord1 - coord2), 2)\n self.distances[i][j] = math.sqrt(summ)\n self.distances[j][i] = math.sqrt(summ)",
"def generate_position_data(self):\n # populate 'Location' field randomly\n self.output['Location'] = np.random.choice(self.locations, self.obs)\n\n # clean up geodata data frame and create 'Position' attribute\n nc = self.geodata[['Lat', 'Lng', 'Elevation']].round(2)\n nc['Elevation'] = nc['Elevation'].astype(int)\n self.geodata['Position'] = nc.astype(\n str).apply(lambda x: ','.join(x), axis=1)\n self.geodata.drop(columns=['Lat', 'Lng', 'Elevation'], inplace=True)\n\n # update 'Position' column in output data frame\n left = self.output.set_index('Location') # set left index\n right = self.geodata.set_index('Location') # set right index\n self.output = left.loc[:, left.columns.union(right.columns)] # union\n self.output.update(right) # update self.output \"Position\" column\n self.output.reset_index(inplace=True)",
"def _set_station_locations(self, station_locations):\n\n if self.data_array is None:\n self.get_mt_dict()\n self.get_period_list()\n self._fill_data_array()\n \n for s_arr in station_locations:\n try:\n d_index = np.where(self.data_array['station'] == \n s_arr['station'])[0][0]\n except IndexError:\n print('Could not find {0} in data_array'.format(s_arr['station']))\n d_index = None\n \n if d_index is not None:\n self.data_array[d_index]['lat'] = s_arr['lat']\n self.data_array[d_index]['lon'] = s_arr['lon']\n self.data_array[d_index]['east'] = s_arr['east']\n self.data_array[d_index]['north'] = s_arr['north']\n self.data_array[d_index]['elev'] = s_arr['elev']\n self.data_array[d_index]['rel_east'] = s_arr['rel_east']\n self.data_array[d_index]['rel_north'] = s_arr['rel_north']",
"def locations(self, locations):\n \n self._locations = locations",
"def add_locations(self):\n for _ in range(0, self.num_locations):\n detector_id = self.generate_id()\n detector_direction = self.generate_direction()\n detector_point = self.generate_point()\n self.dataset[detector_id] = (detector_direction, detector_point)\n assert len(self.dataset) == self.num_locations",
"def _genChildByLocationLookupTable(self):\n runLog.extra(\"Generating location-to-child lookup table.\")\n self.childrenByLocator = {}\n for child in self:\n self.childrenByLocator[child.spatialLocator] = child",
"def get_all_locations(self):",
"def load_data():\n if _LOCATIONS_BY_ID:\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID\n\n # We need to read the locations in order of country -> admin level 1 -> admin level 2 -> city.\n # This is so that the higher resolution locations can look up the lower resolution locations\n # that they belong to, and compute the necessary fields.\n countries_by_code = _load_country_data(_DATA_FILES['country'])\n admin1_by_code = _load_admin1_data(_DATA_FILES['admin_1'], countries_by_code)\n admin2_by_code = _load_admin2_data(_DATA_FILES['admin_2'], countries_by_code, admin1_by_code)\n _load_city_data(_DATA_FILES['city'], countries_by_code, admin1_by_code, admin2_by_code)\n _add_alternate_names(_DATA_FILES['alt_wiki_names'])\n _add_estimated_importances(_DATA_FILES['estimated_importance'])\n\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID",
"def generate_test_locations(self):\n def generate_locations_for_organization(\n location_names, organization_name):\n item_dict = {}\n for name in location_names:\n item_dict['{}_{}'.format(name, organization_name)] = {\n 'name': name,\n 'organization': organization_name\n }\n return item_dict\n\n self.ls_o1_dict = \\\n generate_locations_for_organization(\n ['l1', 'l2', 'l3', 'l4', 'l5'], 'o1')\n\n self.ls_sub1_o1_dict = \\\n generate_locations_for_organization(\n ['l1', 'l2', 'l3', 'l4'], 'sub1_o1')\n\n self.ls_o2_dict = \\\n generate_locations_for_organization(['l1', 'l2', 'l3', 'l4'], 'o2')\n\n self.ls_sub1_o2_dict = \\\n generate_locations_for_organization(['l1', 'l2'], 'sub1_o2')\n\n # generate locations of org_3\n self.ls_o3_dict = \\\n generate_locations_for_organization(['l1', 'l2'], 'o3')\n\n # generate locations dictionary\n self.ls_dict = {\n **self.ls_o1_dict,\n **self.ls_sub1_o1_dict,\n **self.ls_o2_dict,\n **self.ls_sub1_o2_dict,\n **self.ls_o3_dict,\n }\n\n # generate locations in database\n self.locations = self.create_locations_from_data(\n self.ls_dict, self.orgs)",
"def testLocationsSitesData(self):\n self.src.logger.debug(\"Verification of Locations and sites data.\")\n\n locationsSites = [x['name'] for x in self.src.getLocationSitesData(\"Milano\", refresh = True)]\n\n self.assertIn(\"film.it\", locationsSites, \"film.it should be included in the places list.\")\n self.assertIn(\"mymovies.it\", locationsSites, \"mymovies.it should be included in the places list.\")\n\n self.src.logger.debug(\"Verification for Locations seed data passed.\")",
"def set_data_dict(self, location_list=None):\n if location_list is None:\n raise RuntimeError(\"No input data file specified.\")\n\n # First loop add all labels for original data\n # Second loop add all new labels in draw_label_list\n for key in self.global_label_list:\n self.data_dict[key] = []\n for key in self.draw_label_list:\n if key in self.global_label_list:\n continue\n else:\n self.data_dict[key] = []\n\n for location in location_list:\n with open(location) as f:\n last_hour = \"-10\"\n for line in f:\n line = line.strip()\n line_list = line.split(\"\\t\")\n insert_pointer = 2\n cur_hour = line_list[1].split(\"/\")[-1].split(\".\")[-3][0:2]\n for key in self.global_label_list:\n if cur_hour == last_hour:\n self.data_dict[key][-1] += int(line_list[insert_pointer])\n else:\n self.data_dict[key].append(int(line_list[insert_pointer]))\n insert_pointer += 1\n last_hour = cur_hour\n print(self.data_dict)",
"def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)",
"def create_locations(self, data):\n total_objects = len(data)\n parsed_objects = 0\n\n for object in data:\n # Get location title. 'name' val is available to all objects, but Building 'title'\n # and RegionalCampus 'description' are more descriptive. Use them if available.\n if hasattr(object, 'title'):\n title = object['title']\n elif hasattr(object, 'description'):\n title = object['description']\n else:\n title = object['name']\n\n # Get other data.\n mapurl = object['profile_link']\n import_id = object['id']\n\n if title:\n # Check to see if the location name, map url are too long\n if len(title) > 256:\n title = title[0:256]\n if len(mapurl) > 400:\n mapurl = mapurl[0:400]\n if len(import_id) > 256:\n import_id = import_id[0:256]\n\n # See if an existing location exists with the current object ID.\n # Update the existing location if it exists; else, save the new location\n try:\n old_location = Location.objects.get(import_id=import_id)\n except Exception as e:\n logging.debug('No existing location found for %s: %s. Creating new location...' % (title, e))\n # No existing matches found, or the matches were duplicate\n new_location = Location(title=title, url=mapurl, room='', import_id=import_id, reviewed=True)\n try:\n new_location.save()\n except Exception as e:\n logging.error('Unable to save new location %s: %s' % (title, str(e)))\n else:\n parsed_objects += 1\n logging.info('New location %s created.' % title)\n else:\n logging.debug('Existing location %s found with Import ID %s. Updating existing location...' % (title, import_id))\n old_location.title = title\n old_location.url = mapurl\n old_location.room = ''\n old_location.reviewed = True\n try:\n old_location.save()\n except Exception as e:\n logging.error('Unable to save existing location %s: %s' % (title, str(e)))\n else:\n parsed_objects += 1\n logging.info('Existing location %s with Import ID %s updated.' % (title, import_id))\n\n logging.info('Done. %s of %s available objects successfully imported.' % (parsed_objects, total_objects))",
"def populate_task_location():\n from maproulette import db\n from maproulette.models import Task, Challenge\n for challenge in db.session.query(Challenge):\n counter = 0\n for task in db.session.query(Task).filter_by(\n challenge_slug=challenge.slug):\n task.set_location()\n counter += 1\n # commit every 1000\n if not counter % 1000:\n db.session.commit()\n db.session.commit()\n print('done. Location for %i tasks in challenge %s set' %\\\n (counter, challenge.title))",
"def __init__(self, num_locations):\n self.dataset = {}\n self.num_locations = num_locations\n self.add_locations()",
"def get_locations_from_db(self):\n return self.conn.execute(\"\"\"SELECT country, location, metaweather_id from(\n SELECT country, locations.location, metaweather_id, created_at from locations\n {})\"\"\".format(self.last_date_inner_join('locations')))",
"def populate_ni_loc(location):\r\n ni_count = NewsItem.objects.count()\r\n cursor = connection.cursor()\r\n # In case the location is not new...\r\n NewsItemLocation.objects.filter(location=location).delete()\r\n old_niloc_count = NewsItemLocation.objects.count()\r\n i = 0\r\n batch_size = 400\r\n while i < ni_count:\r\n # We don't use intersecting_collection() because we should have cleaned up\r\n # all our geometries by now and it's sloooow ... there could be millions\r\n # of db_newsitem rows.\r\n cursor.execute(\"\"\"\r\n INSERT INTO db_newsitemlocation (news_item_id, location_id)\r\n SELECT ni.id, loc.id FROM db_newsitem ni, db_location loc\r\n WHERE st_intersects(ni.location, loc.location)\r\n AND ni.id >= %s AND ni.id < %s\r\n AND loc.id = %s\r\n \"\"\", (i, i + batch_size, location.id))\r\n connection._commit()\r\n i += batch_size\r\n new_count = NewsItemLocation.objects.count()\r\n logger.info(\"New: %d NewsItemLocations\" % (new_count - old_niloc_count))",
"def update_locations(self):\n # Get list of locations\n locations = self.model.get_locations()\n\n # Upload new locations\n pisspricer = Pisspricer(api)\n pisspricer.upload_new_stores(locations, self.BRAND_ID,\n (self.print_progress, len(locations), 'Get Locations'))",
"def update_restaurant_table(self):\n header = [[\"Restaurant #\", \"Location Name\", \"# of Handles\"]]\n store_list = []\n for restaurant in self.restaurants:\n store_list.append((restaurant.restnum, restaurant.name, int(restaurant.tap_capacity)-len(self.datastore.base_plan)))\n \n self.opti_state_list_model = SimpleTableModel(header + store_list)\n self.opti_state_list.setModel(self.opti_state_list_model)\n\n self.opti_state_list.setSelectionMode(QAbstractItemView.NoSelection)\n\n self.opti_state_list.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if given forecast dictionary contains a numeric value with provided key. | def is_forecast_number(key, forecast):
return key in forecast and type(forecast[key]) in [float, int] | [
"def missing_value_detector(mvd_key, mvd_dict):\n\tif mvd_dict.get(mvd_key):\n\t\treturn 0\n\telse:\n\t\treturn 1",
"def moreThanOne(dict, key):\n\treturn key in dict and dict[key] > 0",
"def checkandConvertToFloat(dictionary: Dict, key: str) -> float:\n convertedValue = 0.0\n try:\n if key in dictionary and dictionary[key] != \"False\":\n convertedValue = float(dictionary[key].replace(\",\", \"\")) \n except Exception as e:\n print(\"Exception occured @ checkandConvertToFloat\",e)\n finally: \n return convertedValue",
"def contains_200(dictnr):\n contains = False\n for i in dictnr:\n if dictnr[i] == 200:\n contains = True\n print(contains)",
"def has_valueQ(remuneracao_dict):\n return any([s.lower().find('valor') != -1 for s in remuneracao_dict.keys()])",
"def is_key(number):\n res = False\n if is_integer(number):\n if int(number) > 0:\n res = True\n return res",
"def __check_amount(self, data, key, result):\n if any(value in key for value in ['amount', 'euro']):\n result['amount'] = float(data[key])",
"def _has_science_data(data_dict, particle_class):\n return_value = False\n\n # Modified to make this check more efficient\n if len(particle_class.science_parameters) < len(data_dict):\n for key in particle_class.science_parameters:\n value = data_dict.get(key, None)\n if value is not None and not(isnan(float(value))):\n return_value = True\n break\n if particle_class._data_particle_type == 'glider_eng_telemetered':\n log.info(\"GliderParser._has_science_data failed: key=[%s] value=[%s]\", key, value)\n else:\n for key, value in data_dict.iteritems():\n if not (isnan(float(value))) and key in particle_class.science_parameters:\n return_value = True\n break\n if particle_class._data_particle_type == 'glider_eng_telemetered':\n log.info(\"GliderParser._has_science_data failed: key=[%s] value=[%s]\", key, value)\n\n return return_value",
"def contains_double_count(key, value, similarity_dict):\n if value in similarity_dict.keys():\n if key in similarity_dict[value]:\n return True\n return False",
"def data_dict_points(data_dict, feature):\n return len(filter(lambda k: isinstance(data_dict[k][feature],\n (int, float)), data_dict))",
"def checkKey(self, key):\n if key in self.aDict.keys():\n return True\n else:\n return False",
"def anyMoreThanOne(dict, keys):\n\tfor key in keys:\n\t\tif key in dict and dict[key] > 0:\n\t\t\treturn True\n\treturn False",
"def value_in_dict(key, dictionary, allow_false_empty=False):\n is_in = key in dictionary and dictionary[key] is not None\n if not allow_false_empty:\n is_in = is_in and bool(dictionary[key])\n return is_in",
"def compare_param_values(value_1: Any, value_2: Any) -> bool:\n data = {'value_1': value_1, 'value_2': value_2}\n for key, value in data.items():\n if isinstance(value, str):\n try:\n data[key] = float(value)\n except ValueError:\n ...\n if isinstance(value, float) and value.is_integer():\n data[key] = int(value)\n return data['value_1'] == data['value_2']",
"def __contains__(self, key: Any) -> bool:\n hash(key)\n if not isinstance(key, Interval):\n if is_valid_na_for_dtype(key, self.dtype):\n return self.hasnans\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False",
"def __contains__(self, k) :\n return k in self.precision()",
"def contains_value(kv_json, value):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n for key in kv_dict:\n if kv_dict[key] == value: # Found value in dictionary\n return True\n return False\n else:\n print(\"Provide A JSON Key Value String\")",
"def _is_numeric(some_num):\n try:\n float(some_num)\n return True\n except:\n return False",
"def check(self, number: int) -> bool:\n if number in self.d:\n return True\n else:\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populate weather data tables. | def populate_weather(connection):
metadata = load_metadata('weather')
cursor = connection.cursor()
water_defs = get_water_definitions()
# Check if tables are already populated.
cursor.execute('SELECT count(*) FROM weather')
weather_count = cursor.fetchone()[0]
if weather_count:
print('Weather tables already populated!')
return
print('WEATHER:')
# Darksky data
for dir_name, location in metadata.items():
print(f'\tPopulating weather: "{location["name"]}".')
# Insert location.
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')
location_id = cursor.lastrowid
# Set weather locations for watercourses/aquifers.
for water_body in [d['body'] for d in water_defs.values()]:
if water_body in location:
cursor.execute(f'''UPDATE {water_body}s
SET location_id = {location_id}
WHERE name IN ('{"','".join(location[water_body])}')''')
break
dir_path = get_data_path('weather', 'raw', dir_name)
for json_file_name in os.listdir(dir_path):
json_path = os.path.join(dir_path, json_file_name)
with open(json_path, 'r', encoding='utf-8') as json_file:
print(f'\t\tPopulating year: {json_file_name[0:-5]}')
year_forecasts = json.load(json_file)
for date, date_forecast in year_forecasts.items():
hourly_forecasts = date_forecast['hourly']
if not hourly_forecasts:
print(f'\t\tNo hourly forecasts for {date}!')
continue
daily_forecast = {
'location_id': location_id,
'time': date_forecast['time'],
'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],
'precipitation': 0,
'snow_accumulation': 0
}
# List of value names with `avg`, `min` and `max` values
value_names = {
'temperature': 'temperature',
'cloud_cover': 'cloudCover',
'dew_point': 'dewPoint',
'humidity': 'humidity',
'pressure': 'pressure',
'uv_index': 'uvIndex',
'precipitation_probability': 'precipProbability',
'precipitation_intensity': 'precipIntensity'
}
# Value name counters, which indicate how many times (out of 24)
# certain value appears in hourly data.
value_counts = {k: 0 for k in value_names.keys()}
for value_name in value_names.keys():
daily_forecast[f'{value_name}_avg'] = 0.0
daily_forecast[f'{value_name}_min'] = float('inf')
daily_forecast[f'{value_name}_max'] = float('-inf')
# Calculate daily forecast values from hourly forecasts.
for hourly_forecast in hourly_forecasts:
for value_name in value_names.keys():
orig_value_name = value_names[value_name]
if is_forecast_number(orig_value_name, hourly_forecast):
daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]
daily_forecast[f'{value_name}_min'] = min(
hourly_forecast[orig_value_name],
daily_forecast[f'{value_name}_min']
)
daily_forecast[f'{value_name}_max'] = max(
hourly_forecast[orig_value_name],
daily_forecast[f'{value_name}_max']
)
value_counts[value_name] += 1
if is_forecast_number('precipAccumulation', hourly_forecast) \
and hourly_forecast['precipType'] == 'snow':
daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']
elif is_forecast_number('precipIntensity', hourly_forecast) \
and is_forecast_number('precipProbability', hourly_forecast):
daily_forecast['precipitation'] += \
hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']
for value_name, value_count in value_counts.items():
if value_count:
# Calculate average.
daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count
else:
# If value never appeared
daily_forecast[f'{value_name}_avg'] = 'NULL'
daily_forecast[f'{value_name}_min'] = 'NULL'
daily_forecast[f'{value_name}_max'] = 'NULL'
cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})
VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')
# IOT data:
for location in SETTINGS['weather_locations_iot']:
print(f'\tPopulating weather: "{location["name"]}".')
# Insert location.
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')
location_id = cursor.lastrowid
# Set weather locations for watercourses/aquifers.
for water_body in [d['body'] for d in water_defs.values()]:
if water_body in location:
cursor.execute(f'''UPDATE {water_body}s
SET location_id = {location_id}
WHERE name IN ('{"', '".join(location[water_body])}')''')
# Set locations for all stations on given water body to match its location.
cursor.execute(f'''SELECT id
FROM {water_body}s
WHERE location_id = {location_id}''')
ids = [row[0] for row in cursor.fetchall()]
if len(ids):
cursor.execute(f'''UPDATE {water_body}_stations
SET location_id = {location_id}
WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')
break
file_name = f'''{location['lat']}-{location['lng']}.json'''
json_path = get_data_path('weather', 'raw', file_name)
# If data file doesn't exist, download it first.
if not os.path.isfile(json_path):
with open(json_path, 'wb', encoding="utf-8") as file:
file.write(read_from_url(location['url'], decode=False))
with open(json_path, 'r', encoding='utf-8') as json_file:
row_names = {
"Sun_duration": "sun_duration",
"CloudCover": "cloud_cover_avg",
"Percipitation": "precipitation",
"New_snow_blanket": "snow_accumulation",
"Snow_blanket": "snow_depth",
"TemperatureAvg": "temperature_avg",
"TemperatureMin": "temperature_min",
"TemperatureMax": "temperature_max"
}
forecasts = json.load(json_file)
for forecast in forecasts:
f = {row_names[k]: forecast[k] for k in row_names.keys()}
f['location_id'] = location_id
f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)
cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})
VALUES ({', '.join([str(v) for v in f.values()])})''') | [
"def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')\n aquifer_count = cursor.fetchone()[0]\n\n if watercourse_count and aquifer_count:\n print('Water tables already populated!')\n return\n\n station_data = get_station_data()\n\n for archive in metadata.keys():\n print(f'{archive}-water:'.upper())\n water_body = get_water_definitions(archive)['body']\n\n # 1. Populate watercourses/aquifers:\n stations = {}\n for water_body_name in metadata[archive].keys():\n print(f'\\tPopulating {water_body}: \"{water_body_name}\"')\n cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)\n VALUES (0, '{water_body_name}')''')\n water_body_id = cursor.lastrowid\n\n # 2. Populate watercourse_stations/aquifer_stations:\n for station_id in metadata[archive][water_body_name]['stations']:\n station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])\n\n if station_id in stations:\n # Prefer watercourses/aquifer with more stations\n current_len = len(metadata[archive][water_body_name]['stations'])\n previous_len = len(metadata[archive][stations[station_id]]['stations'])\n\n if current_len < previous_len:\n print(f'\\t\\tStation already exists: {station_id} - \"{station_name}\" (\"{water_body_name}\")')\n continue\n else:\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station: {station_id} - \"{station_name}\" from \"{stations[station_id]}\")')\n\n stations[station_id] = water_body_name\n print(f'\\t\\tPopulating station: {station_id} - \"{station_name}\"')\n\n # Insert station location if station data exists.\n location_id = 0\n station_row = station_data.query(f'ŠIFRA == \"{station_id}\"')\n if not station_row.empty:\n index = station_row.index[0]\n lat = station_row.at[index, 'LAT']\n lng = station_row.at[index, 'LON']\n if not np.isnan(lat) and not np.isnan(lng):\n name = f\"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})\"\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{name}', {lat}, {lng})''')\n location_id = cursor.lastrowid\n\n # Insert station.\n cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)\n VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')\n\n # 3. Populate watercourse_measurements/aquifer_measurements:\n if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],\n station_id):\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station with useless data: {station_id} - \"{station_name}\"')\n\n # Remove empty watercourses/aquifers.\n cursor.execute(f'''SELECT w.id, w.name\n FROM {water_body}s w\n WHERE NOT EXISTS (\n SELECT s.id \n FROM {water_body}_stations s \n WHERE w.id = s.{water_body}_id\n )''')\n\n for row in cursor.fetchall():\n cursor.execute(f'''DELETE \n FROM {water_body}s\n WHERE id = {row[0]}''')\n print(f'\\tRemoved empty {water_body}: \"{row[1]}\"')",
"def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m",
"def update_database(self, data):\n with UseDatabase('weather.sqlite') as cursor:\n sqlite_insert = \"\"\"INSERT OR IGNORE INTO weather\n (sample_date,location,min_temp,max_temp,avg_temp)\n VALUES (?,?,?,?,?);\"\"\"\n for item in data.items():\n cursor.execute(sqlite_insert,\n (item[0], \"Winnipeg,MB\", item[1][\"Max\"],\n item[1][\"Min\"], item[1][\"Mean\"]))",
"def create_database(self):\n test = WeatherScraper(\n 'https://climate.weather.gc.ca/climate_data/daily_data_e.html?'\n 'StationID=27174&timeframe=2&StartYear=1999&'\n 'EndYear=1999&Day=1&Year=2015&Month=11#')\n weather = test.scrape_weather()\n\n if os.path.exists(\"weather.sqlite\"):\n os.remove(\"weather.sqlite\")\n with UseDatabase('weather.sqlite') as cursor:\n execute_str = '''create table weather\n (id integer primary key autoincrement not null,\n sample_date text not null,\n location text not null,\n min_temp real not null,\n max_temp real not null,\n avg_temp real not null,\n UNIQUE (sample_date));'''\n\n cursor.execute(execute_str)\n\n sqlite_insert = \"\"\"INSERT INTO weather\n (sample_date,location,min_temp,max_temp,avg_temp)\n VALUES (?,?,?,?,?);\"\"\"\n\n for item in weather.items():\n cursor.execute(sqlite_insert,\n (item[0], \"Winnipeg,MB\", item[1][\"Max\"],\n item[1][\"Min\"], item[1][\"Mean\"]))",
"def weather_setter(weather_data, wp): \r\n try:\r\n Weather.objects.filter(weather_provider=wp).delete()\r\n except Weather.DoesNotExist:\r\n pass\r\n for weather in weather_data: \r\n obj_wp = Weather.objects.create(weather_provider=weather['weather_provider'])\r\n obj_wp.datetime = weather['datetime']\r\n if 'clouds' in weather:\r\n obj_wp.clouds = int(weather['clouds'])\r\n if 'precipitation' in weather:\r\n obj_wp.precipitation = float(weather['precipitation'])\r\n obj_wp.temperature = int(weather['temperature'])\r\n obj_wp.pressure = int(weather['pressure'])\r\n obj_wp.humidity = int(weather['humidity'])\r\n obj_wp.wind_speed = int(weather['wind_speed'])\r\n obj_wp.wind_direction = int(weather['wind_direction'])\r\n obj_wp.clouds_img = weather['clouds_img']\r\n obj_wp.falls_img = weather['falls_img']\r\n obj_wp.save()",
"def __init__(self):\n self.stations_df = pd.read_csv(WeatherData.data_path + 'all_tx_stations.csv')\n self.weather_df = pd.read_csv(WeatherData.data_path + 'houston_weather.csv')\\\n .merge(self.stations_df, on='station_id', how='inner')\\\n .fillna({'time': 0})\n\n # Convert date to datetime type\n self.weather_df.loc[self.weather_df.time == 2400, 'time'] = 2359\n self.weather_df['date'] = pd.to_datetime(\n self.weather_df.date.astype(str) + ' ' + self.weather_df.time.map(lambda x: f\"{int(x):04d}\"),\n format=\"%Y%m%d %H%M\"\n )\n self.weather_df = self.weather_df.drop(columns='time')\n\n self.weather_df.value = pd.to_numeric(self.weather_df.value)",
"def create_data(self):\n \n if self.table not in metadata.tables.keys():\n return print(f\"{self.table} does not exist\")\n\n if self.table == \"customers\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n insert_stmt = customers.insert().values(\n first_name = faker.first_name(),\n last_name = faker.last_name(),\n email = faker.email(),\n address = faker.address(),\n dob = faker.date_of_birth(minimum_age=16, maximum_age=60)\n )\n conn.execute(insert_stmt)\n\n if self.table == \"products\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n insert_stmt = products.insert().values(\n name = random.choice(product_list),\n price = faker.random_int(1,100000) / 100.0\n )\n conn.execute(insert_stmt)\n\n if self.table == \"stores\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n insert_stmt = stores.insert().values(\n address = faker.address()\n )\n conn.execute(insert_stmt)\n\n if self.table == \"transactions\":\n with engine.begin() as conn:\n for _ in range(self.num_records):\n date_obj = datetime.datetime.now() - datetime.timedelta(days=random.randint(0,30))\n\n insert_stmt = transactions.insert().values(\n transaction_date=date_obj.strftime(\"%Y/%m/%d\"),\n customer_id=random.choice(conn.execute(select([customers.c.customer_id])).fetchall())[0],\n product_id=random.choice(conn.execute(select([products.c.product_id])).fetchall())[0],\n store_id=random.choice(conn.execute(select([stores.c.store_id])).fetchall())[0]\n )\n conn.execute(insert_stmt)",
"def update_weather_for_all_stations():\n\n weather.get_metars(airport_render_config.keys(), logger=LOGGER)",
"def init_tables():\n # drop_table_m_candidates()\n # drop_table_m_qiita_users()\n create_table_m_candidates()\n create_table_m_qiita_users()",
"def set_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets(\n id INTEGER PRIMARY KEY,\n tweet_id INTEGER,\n insert_date TEXT,\n created_at TEXT,\n hashtag TEXT)\n \"\"\")\n\n conn.execute(\"\"\"CREATE TABLE tweet_peaks(\n peak_datetime TEXT NOT NULL,\n hashtag TEXT NOT NULL,\n time_frame TEXT,\n mean REAL,\n std REAL,\n sensibility REAL,\n freq_limit REAL,\n qt_tweets INTEGER,\n id TEXT PRIMARY KEY,\n probability REAL);\n \"\"\")",
"def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)",
"def CalculateAndPutData(weather):\n\n initial = weather[0]\n\n dewpoint_f_high = initial.dewpoint_f\n dewpoint_f_low = initial.dewpoint_f\n dewpoint_c_low = initial.dewpoint_c\n dewpoint_c_high = initial.dewpoint_c\n current_temp_c_high = initial.current_temp_c\n current_temp_c_low = initial.current_temp_c\n current_temp_f_high = initial.current_temp_f\n current_temp_f_low = initial.current_temp_f\n\n for datapoint in weather:\n if datapoint.dewpoint_f > dewpoint_f_high:\n dewpoint_f_high = datapoint.dewpoint_f\n if datapoint.dewpoint_f < dewpoint_f_low:\n dewpoint_f_low = datapoint.dewpoint_f\n if datapoint.dewpoint_c > dewpoint_c_high:\n dewpoint_c_high = datapoint.dewpoint_c\n if datapoint.dewpoint_c < dewpoint_c_low:\n dewpoint_c_low = datapoint.dewpoint_c\n if datapoint.current_temp_c > current_temp_c_high:\n current_temp_c_high = datapoint.current_temp_c\n if datapoint.current_temp_c < current_temp_c_low:\n current_temp_c_low = datapoint.current_temp_c\n if datapoint.current_temp_f > current_temp_f_high:\n current_temp_f_high = datapoint.current_temp_f\n if datapoint.current_temp_f < current_temp_f_low:\n current_temp_f_low = datapoint.current_temp_f\n\n dew = models.DewpointPerDay.all()\n dew.filter('date_time_added >', age_threshold)\n dew_data = dew.get()\n\n if not dew_data:\n new_dew = models.DewpointPerDay()\n\n new_dew.dewpoint_f_high = dewpoint_f_high\n new_dew.dewpoint_f_low = dewpoint_f_low\n new_dew.dewpoint_c_low = dewpoint_c_low\n new_dew.dewpoint_c_high = dewpoint_c_high\n\n new_dew.put()\n\n temp_per_day = models.TemperaturePerDay.all()\n temp_per_day.filter('date_time_added >', age_threshold)\n temp_per_day_data = temp_per_day.get()\n\n if not temp_per_day_data:\n new_temp = models.TemperaturePerDay()\n\n new_temp.current_temp_c_high = current_temp_c_high\n new_temp.current_temp_c_low = current_temp_c_low\n new_temp.current_temp_f_high = current_temp_f_high\n new_temp.current_temp_f_low = current_temp_f_low\n\n new_temp.put()",
"def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()",
"def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)",
"def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist",
"def generate_weather_forecast():\n start_time = datetime.now()\n end_time = start_time + timedelta(days=10)\n df = generate_weather(start_time, end_time)\n return df",
"def read_weather(self):\n print \"Reading weather data from file\",self.datafile\n tab = ascii.read(self.datafile)\n \n # Fix 'T' values in precipitation column, which represent tiny\n # amounts of rain (not measurable)\n TINY_VALUE = '.005' # 0.005 is half the smallest measurable value\n rain = tab['PrecipitationIn']\n wbad = (rain == 'T')\n rain[wbad] = TINY_VALUE\n rain = numpy.array(rain).astype(\"float\")\n\n # Replace string version of precip with float version\n tab['PrecipIn'] = rain\n tab.remove_column('PrecipitationIn')\n\n self.table = tab",
"def _prepare_dataset(self):\n loads = pd.concat(ul.total_experiment_load())\n return [ul.add_temperatures(loads, period) \n for period in ul.experiment_periods()]",
"def init_datasets(self, dataset_names, columns):\n for dataset_name in dataset_names:\n hdf5_dataset_name = self.schema.get(dataset_name)\n if hdf5_dataset_name is None:\n warnings.warn(\"Skipping %s (not in schema)\" % dataset_name)\n else:\n self[dataset_name] = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=self.query_start,\n end=self.query_end_plusplus,\n timestep=self.timestep,\n num_columns=len(columns),\n column_names=columns,\n sort_hex=self.sort_hex)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to construct multidimensional dictionaries e.g myhash = _makehash() myhash[1][2] = 4 myhash[2][5][8] = 17 | def _makehash():
return defaultdict(_makehash) | [
"def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1",
"def _make_hash(self):\n my_hash = {}\n for feature_id, feature in self.feature_dict.items():\n bins = self._get_hash_bins(feature)\n chrom = feature.spanning_segment.chrom\n strand = feature.spanning_segment.strand\n if chrom not in my_hash.keys():\n my_hash[chrom] = {}\n my_hash[chrom][\"+\"] = {}\n my_hash[chrom][\"-\"] = {}\n for b in bins:\n try:\n my_hash[chrom][strand][b].append(feature_id)\n except KeyError:\n my_hash[chrom][strand][b] = [feature_id]\n return my_hash",
"def get_hash_table(self):\n hash_table = {}\n for l in self.X:\n hash_table[l] = {}\n for i in range(self.X[l].shape[0]):\n hash_table[l][get_np_hash(self.X[l][i, :, :])] = np.tile(\n self.y[l][i, :], 2)\n return hash_table",
"def _make_nest(self, array):\n return {'a': array,\n 'b': [jnp.ones_like(array), {'c': jnp.zeros_like(array)}]}",
"def generate_dict(length):\r\n primeDict = {}\r\n index = 2\r\n \r\n while (index < length):\r\n primeDict[index]=True\r\n index = index+1\r\n \r\n return primeDict",
"def _build_hash_table(arr: [str]):\n ht = {}\n for cur_str in arr:\n\n anagram = cur_str[::-1]\n if cur_str in ht.keys():\n # This string is an anagram of some previous\n # Increase anagram count for hash table item\n (original, orig_cnt, anag_cnt) = ht[cur_str]\n ht[cur_str] = (original, orig_cnt, anag_cnt + 1)\n elif anagram in ht.keys():\n # This string equals to some prevoius\n # Increase original count for hash table item\n (original, orig_cnt, anag_cnt) = ht[anagram]\n ht[anagram] = (original, orig_cnt+1, anag_cnt)\n else:\n # This string is new\n ht[anagram] = (cur_str, 1, 0)\n return ht",
"def _make_hashable(items):\n\n def convert(x):\n # Perform any conversions here to make a variable hashable\n if isinstance(x, np.ndarray):\n # Create an sha1 of the data, and throw in a string\n # and the shape.\n return ('__type_np.ndarray', x.shape,\n xxhash.xxh3_128_hexdigest(x))\n elif isinstance(x, (list, tuple)):\n return _make_hashable(x)\n elif isinstance(x, dict):\n return _make_hashable(sorted(x.items()))\n return x\n\n return tuple(map(convert, items))",
"def _get_tile_loc_dict():\n tiles = {}\n tiles[1] = [0,0]\n tiles[2] = [0,1]\n tiles[3] = [0,2]\n tiles[4] = [1,0]\n tiles[5] = [1,1]\n tiles[6] = [1,2]\n tiles[7] = [2,0]\n tiles[8] = [2,1]\n tiles[0] = [2,2]\n\n return tiles",
"def build_hash_dicts(self):\n self.hash_dicts = []\n for i in range(0, self.number_of_signatures):\n self.hash_dicts.append({self.feature_space[idx]: idx\n for idx in xrange(len(self.feature_space))})\n shuffle(self.feature_space)",
"def test_hash_numpy_array2_multi_dimensional_can_not_retrieve_individual_array_item_hashes(self):\n t1 = np.array([[1, 2, 3, 4], [4, 2, 2, 1]], np.int8)\n t1_hash = DeepHashPrep(t1)\n try:\n t1_hash[t1[0]]\n except Exception as e:\n assert str(e).strip(\"'\") == HASH_LOOKUP_ERR_MSG.format(t1[0])",
"def _hash(self) -> None:\r\n # for a unit cube there are 8 possible hashes\r\n # returns the tuple of with all 8 hashes\r\n\r\n self.hashes[\"aaa\"] = P[P[P[self.xi] + self.yi] + self.zi]\r\n self.hashes[\"aab\"] = P[P[P[self.xi] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"aba\"] = P[P[P[self.xi] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"abb\"] = P[P[P[self.xi] + self._inc(self.yi)] + self._inc(self.zi)]\r\n self.hashes[\"baa\"] = P[P[P[self._inc(self.xi)] + self.yi] + self.zi]\r\n self.hashes[\"bab\"] = P[P[P[self._inc(self.xi)] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"bba\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"bbb\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self._inc(self.zi)]",
"def fresh_hash(self):\n _h = defaultdict(lambda: 0)\n very_small = 0.000000000001\n for g in self.groups: _h[g] = { \"total\": very_small, \"var_all\": 0 }\n return _h",
"def create_dictionary():\n d = {}\n for y in range(HEIGHT):\n if (y % 2) != 0:\n pos = (10*y)+10\n else:\n pos =((10*y)-9)+10 \n for x in range(WIDTH):\n xy_tuple = (x,y)\n d[pos] = xy_tuple\n if (y % 2) != 0:\n pos = pos - 1\n else:\n pos = pos + 1\n \n return d",
"def create_hash_functions(self):",
"def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\n return output",
"def get_dict(size, key_prefix, value_preix):\n my_dict = {}\n for num in range(0, size):\n key = \"%s_%s\" % (key_prefix, num)\n value = \"%s_%s\" % (value_preix, num)\n my_dict[key] = value\n return my_dict",
"def createDictionary(keys, values):\n return dict(map(lambda x, y: (x, y), keys, values))",
"def make_dict(keys,values):\n return dict(zip(keys, values))",
"def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert headers of fetched tickers to same format for convenient data storage in Database. This method assumes that parser's headers are configured properly(headers_dict), if one of the headers is missing in config file exception raised | def convert_headers(self, tickers):
result = _makehash()
for pair_name, fetched_values_dict in list(tickers.items()):
for header, value in list(fetched_values_dict.items()):
result[pair_name][self.config['headers'][header]] = value
return result | [
"def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new",
"def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())",
"def _parse_headers(self):\n\n headers = self._buf.strip().split('\\r\\n')\n for header in headers:\n (key, value) = header.split(': ')\n value = value.strip()\n key = key.lower()\n if key in self._headers:\n if isinstance(self._headers[key], list):\n self._headers[key].append(value)\n else:\n self._headers[key] = [self._headers[key], value]\n else:\n self._headers[key] = value",
"def _format_header(headers=None):\n if not headers:\n headers = {}\n tmp_headers = {}\n for k in headers.keys():\n # if isinstance(headers[k], unicode):\n # headers[k] = convert_utf8(headers[k])\n\n if k.lower().startswith(SELF_DEFINE_HEADER_PREFIX):\n k_lower = k.lower().strip()\n tmp_headers[k_lower] = headers[k]\n else:\n tmp_headers[k.strip()] = headers[k]\n return tmp_headers",
"def _parse_headers(raw_headers: List[str]) -> Dict[str, str]:\n headers: Dict[str, str] = {}\n for header in raw_headers:\n name = header[: header.find(\":\")].strip()\n value = header[header.find(\":\") + 1 :].strip()\n headers[name.lower()] = value\n\n return headers",
"def normalize_headers(self, headers):\n return sorted(\n [(b\"server\", b\"daphne\")]\n + [\n (name.lower(), value.strip())\n for name, value in headers\n if name.lower() not in (b\"server\", b\"transfer-encoding\")\n ]\n )",
"def _parse_headers(headers):\n try:\n return dict(header.split(\":\") for header in headers)\n except:\n raise ValueError(\"Invalid headers %s\" % headers)",
"def _headers(self, headers_dict):\n return Headers(dict((k,[v]) for (k,v) in headers_dict.items()))",
"def get_headers(self, scope):\n headers = {}\n for raw_key, raw_value in scope[\"headers\"]:\n key = raw_key.decode(\"latin-1\")\n value = raw_value.decode(\"latin-1\")\n if key in headers:\n headers[key] = f\"{headers[key]}, {value}\"\n else:\n headers[key] = value\n return headers",
"def _ToTuples(headers):\n all_headers = []\n for line in headers:\n if line[0] in '\\t ':\n if not all_headers:\n logging.warning(\n 'Unexpected response header continuation line [%s]', line)\n continue\n name, value = all_headers.pop()\n value += '\\n ' + line.strip()\n else:\n name_value = RealHttpFetch._GetHeaderNameValue(line)\n if not name_value:\n logging.warning(\n 'Response header in wrong format [%s]', line)\n continue\n name, value = name_value # pylint: disable=unpacking-non-sequence\n all_headers.append((name, value))\n return all_headers",
"def parseColumnHeaders(self):\n \n with open(self.path, \"r\") as f:\n data = [line for line in f]\n headers = data[10]\n del data\n \n #split and convert to lower case\n headers = headers.split()\n headers = [item.lower() for item in headers[:]]\n \n #concatenate date and time headers to single date_time header \n if \"date\" in headers[:]:\n indexPos = headers[:].index(\"date\")\n headers[indexPos]=\"date_time\"\n if \"time\" in headers[:]:\n headers.remove(\"time\")\n \n #fix dual SV column headers\n svIndex = [v for v,item in enumerate(headers) if item =='sv']\n if len(svIndex) == 1:\n headers[svIndex[0]] = 'sv1'\n elif len(svIndex) == 2:\n headers[svIndex[0]] = 'sv2'\n headers[svIndex[1]] = 'sv1'\n \n return headers",
"def __prepare_headers(self, headers, is_json):\n headers.update(self.__headers)\n if is_json:\n headers['Content-Type'] = 'application/json; charset=utf-8'\n\n return headers",
"def updateheader(self, headerlist=[], http_s_obj=None):\n header = {}\n for headerparam in headerlist:\n key_value = headerparam.split(\":\", 1)\n if len(key_value) == 2:\n try:\n key = key_value[0]\n value = key_value[1].strip()\n header.update({key: value})\n if http_s_obj:\n if http_s_obj.header.get(key):\n http_s_obj.header.update({key: value})\n except Exception:\n continue\n return header",
"def process_headers(self, listed_data):\n\t\treturn { val.rstrip().split(\": \")[0]: val.rstrip().split(\": \")[1] for val in listed_data }",
"def _build_headers(self):\n headers = {}\n headers.update(self.data_sources)\n headers.update(self.seasons)\n headers.update(self.region)\n headers.update(self.subregions)\n return headers",
"def _make_headers_df(headers_response):\n\n headers_df = util.make_dataframe(headers_response)\n headers_df = headers_df[\n [\"text\", \"column_index_begin\", \"column_index_end\", \"row_index_begin\", \"row_index_end\", \"cell_id\",\n \"text_normalized\"]]\n return headers_df",
"def _normalize_headers(self):\n self.ncookies=dict((k.lower(), v) for k, v in self.request.cookies.iteritems())\n self.nheaders=dict((k.lower(), v) for k, v in self.request.headers.iteritems())",
"def scrub_headers(headers):\n if isinstance(headers, dict):\n headers = headers.items()\n headers = [\n (parse_header_string(key), parse_header_string(val))\n for (key, val) in headers\n ]\n if not logger_settings.get('redact_sensitive_headers', True):\n return dict(headers)\n if logger_settings.get('reveal_sensitive_prefix', 16) < 0:\n logger_settings['reveal_sensitive_prefix'] = 16\n return {key: safe_value(key, val) for (key, val) in headers}",
"def _parse_hdr(self):\n self.schema[\"hdr\"] = {}\n for line in self.info.split(\"\\n\"):\n for hdr in [\"filename\", \"step\", \"last_update\"]:\n if line.startswith(hdr):\n self.schema[\"hdr\"][hdr] = line.split(\" = \")[-1].strip('\"')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the similarity based on Cosine Similarity between two CTRDMs | def cosinesimilarity_cal(CTRDM1, CTRDM2):
# get number of conditions
n_cons = np.shape(CTRDM1)[0]
# calculate the number of value above the diagonal in RDM
n = n_cons * (n_cons - 1)
# initialize two vectors to store the values above the diagnal of two RDMs
v1 = np.zeros([n], dtype=np.float64)
v2 = np.zeros([n], dtype=np.float64)
# assignment
nn = 0
for i in range(n_cons):
for j in range(n_cons):
if i != j:
v1[nn] = CTRDM1[i, j]
v2[nn] = CTRDM2[i, j]
nn = nn + 1
# calculate the Cosine Similarity
V1 = np.mat(v1)
V2 = np.mat(v2)
num = float(V1 * V2.T)
denom = np.linalg.norm(V1) * np.linalg.norm(V2)
cos = num / denom
similarity = 0.5 + 0.5 * cos
return similarity | [
"def _compute_cosine_similarity(d1: np.ndarray, d2: np.ndarray) -> float:\n assert d1.shape == d2.shape\n\n # To avoid dividing by zero. This edge case occurs when both vectors share\n # no common elements\n if (np.linalg.norm(d1) * np.linalg.norm(d2)) == 0:\n return 0\n\n # Computing cosine similarity between both vectors, refer to report for explicit forumla\n similarity = (np.dot(d1, d2)) / (np.linalg.norm(d1) * np.linalg.norm(d2))\n return similarity",
"def similarity(e1, e2):\n tfidf = getTFIDF()\n v1 = tfidf.transform(vectorizer.transform([cleantext(e1)]))\n v2 = tfidf.transform(vectorizer.transform([cleantext(e2)]))\n sim = cosine_similarity(v1,v2)\n #print('similarity({}, {}) = {}'.format(e1,e2,sim))\n return float(sim[0,0])",
"def return_cosine_similarity(self, a, b):\n #return np.dot(a,b.T)/(np.linalg.norm(a)*np.linalg.norm(b))\n return np.dot(a,b.T) / (np.sqrt(np.dot(a,a.T)) * np.sqrt(np.dot(b,b.T)))",
"def cosine_similarity(a, b):\n a = a.flatten()\n b = b.flatten()\n return jnp.dot(a, b) / (jnp.linalg.norm(a) * jnp.linalg.norm(b))",
"def cosine_similarity(arr1,arr2):\n\tcos = np.dot(arr1,arr2)\n\tv1 = np.sum(np.square(arr1))\n\tv2 = np.sum(np.square(arr2))\n\treturn cos/(v1*v2)",
"def cosineSimilarity(string1, string2, idfsDictionary):\n w1 = tfidf(tokenize(string1),idfsDictionary)\n w2 = tfidf(tokenize(string2),idfsDictionary)\n return cossim(w1, w2)",
"def get_cosine_similarity(doc1, doc2):\n count_vectorizer = CountVectorizer(stop_words='english')\n sparse_matrix = count_vectorizer.fit_transform(raw_documents=[doc1, doc2])\n dtm = sparse_matrix.todense()\n df_dtm = pd.DataFrame(data=dtm, \n columns=count_vectorizer.get_feature_names(), \n index=['doc1', 'doc2'])\n similarity_matrix = cosine_similarity(df_dtm, df_dtm)\n similarity_score = round(similarity_matrix[0][1], 6)\n return similarity_score",
"def similarity(d1, d2):\n # clean and tokenize\n d1 = tokenize(clean(d1))\n d2 = tokenize(clean(d2))\n\n # build token index so as to not be operating on strings\n vocab = list(set(d1 + d2))\n v1 = [vocab.index(token) for token in d1]\n v2 = [vocab.index(token) for token in d2]\n\n # calculate token frequency\n seq_length = 1\n f1 = frequency(v1, seq_length)\n f2 = frequency(v2, seq_length)\n\n return round(cosine_similarity(f1, f2), 2)",
"def cosine_similarity(pos1, pos2):\n return dot(pos1, pos2) / (norm(pos1) * norm(pos2))",
"def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:\n return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))",
"def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity",
"def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])",
"def cosine_similarity(doc1, doc2):\n\n distance = 0\n v1, v2 = doc1.vector, doc2.vector\n\n # Choose the doc with less features to lessen the calculations.\n if len(v2.keys()) < len(v1.keys()):\n v1, v2 = v2, v1\n\n for feature in v1.keys():\n distance += (v1[feature] * v2[feature])\n\n return distance",
"def calculate_cosine_similarity_matrix(v1, v2):\n # Shape: (batch_size, 1, num_sentence_words1, rnn_hidden_size)\n expanded_v1 = tf.expand_dims(v1, 1)\n # Shape: (batch_size, num_sentence_words2, 1, rnn_hidden_size)\n expanded_v2 = tf.expand_dims(v2, 2)\n # Shape: (batch_size, num_sentence_words2, num_sentence_words1)\n cosine_relevancy_matrix = cosine_distance(expanded_v1,expanded_v2)\n return cosine_relevancy_matrix",
"def cosine_similarity(x1, x2, dim=1, eps=1e-8):\r\n w12 = torch.sum(x1 * x2, dim)\r\n w1 = torch.norm(x1, 2, dim)\r\n w2 = torch.norm(x2, 2, dim)\r\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()",
"def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()",
"def cosine_similarity(x1, x2, dim=1, eps=1e-8):\r\n w12 = torch.sum(x1 * x2, dim)\r\n w1 = torch.norm(x1, 2, dim)\r\n w2 = torch.norm(x2, 2, dim)\r\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()",
"def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))",
"def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds basic_vector to the basic vectors. If there are at least 3 arrays in _basic_vectors, then add a new array to _featureVector. This added array is composed of the basic vectors and its 2 first central derivatives basic_vector must be the array returned by the mfcc. | def build_feature_vector(self, basic_vector):
basic_vector = basic_vector - np.mean(basic_vector)
self._basic_vectors.append(basic_vector)
if len(self._basic_vectors) > 2:
#if there are at least 3 basic vectors we can calculate the central derivative for the vector before this one
first_derivative = (basic_vector - self._basic_vectors[-3])/(2*self.seconds_to_next_vector)
second_derivative = (basic_vector - 2*self._basic_vectors[-2] + self._basic_vectors[-3])/(self.seconds_to_next_vector**2)
feature_vector = np.concatenate((basic_vector, first_derivative, second_derivative))
self._feature_vectors.append(feature_vector) | [
"def feature_vector(features, vector):\n clean_features = set(features)\n new_features_vector = featurize(vector,clean_features)\n return new_features_vector",
"def _add_support_vectors(self, x: np.ndarray, y: np.ndarray) -> None:\n\n n_vectors = x.shape[0]\n\n self.support_vectors = np.vstack([self.support_vectors, x])\n self.alpha = np.append(self.alpha, np.zeros(n_vectors))\n self.target = np.append(self.target, y)\n\n new_kernel_values = self._kernel(x, self.support_vectors)\n\n self.kernel_mx = np.vstack([self.kernel_mx, new_kernel_values[:, :-n_vectors]])\n self.kernel_mx = np.hstack([self.kernel_mx, new_kernel_values.T])\n\n gradient = y - new_kernel_values.dot(self.alpha)\n self.gradient = np.append(self.gradient, gradient)\n\n a = y * self.c\n a[a > 0] = 0\n self.a = np.append(self.a, a)\n\n b = y * self.c\n b[b < 0] = 0\n self.b = np.append(self.b, b)",
"def add_vectors(self, vectors):\n for v in vectors:\n self.add_v(v)",
"def add_vector(*args):\n return _vector.add_vector(*args)",
"def append(self, vector):\n self._vectors.append(Vec2(*vector))",
"def add_feature_vector(self, task):\n tmp = self.create_feature_vector(task)\n self.feature_vectors[task] = tmp",
"def create_feature_vector(self, files=[], name=\"\"):\n\n if( len(files)==0 ):\n return\n\n epsilon = 1e-8\n set = []\n\n #iterating all files obtaining the significant data to compute the feature vectors\n for file in files:\n\n #reading the csv files and keeping the first 3 columns (x,y,time)\n file_data = pd.read_csv(file)\n file_data = file_data.to_numpy()\n data = np.zeros((file_data.shape[0],7))\n data[:,0:3] = file_data[:,0:3]\n\n #computing the other interesting features\n angle = np.arctan(data[:,1]/(data[:,0]+epsilon))\n velocity = np.sqrt( np.square(data[:,1]) + np.square(data[:,0]) )\n log_curvature = np.log10( velocity/(angle+epsilon) )\n acceleration = np.sqrt( np.square(velocity) + np.square(velocity*angle) )\n\n #assigning the new computed features\n data[:,3] = angle\n data[:,4] = velocity\n data[:,5] = log_curvature\n data[:,6] = acceleration\n\n #normalizing the data\n data = self.normalization(data)\n set.append(data)\n\n return set",
"def feature_vector1(self, feature_vector1):\n\n self._feature_vector1 = feature_vector1",
"def mfcc_vector(ceps):\n feat_arr = []\n num_ceps = len(ceps)\n\n feat_arr.append(np.mean(ceps[:], axis=1))\n Vx = np.array(feat_arr)\n # print(len(Vx[0]))\n return Vx",
"def setup_vector_fields(self):\n\n added_fields = []\n for field in self.vector_fields:\n field = self.add_vector_field(field)\n if field is None:\n continue\n added_fields.append(field)\n\n self.vector_fields = tuple(added_fields)",
"def augment_feature_vector(X):\n column_of_ones = np.zeros([len(X), 1]) + 1\n return np.hstack((column_of_ones, X))",
"def add(self, featVect, label):\n if label in self.labelToNum:\n l = self.labelToNum[label]\n else:\n l = len(self.numToLabel)\n self.numToLabel.append(label)\n self.labelToNum[label] = l\n \n self.blocks.append((featVect.reshape((1,featVect.shape[0])).astype(numpy.double),[l]))",
"def AddElementVector(self, *args):\n return _vector.Vector_AddElementVector(self, *args)",
"def __add__(self, vector):\n self._x += vector.get_x()\n self._y += vector.get_y()\n self._z += vector.get_z()",
"def add_vector_field(self, fieldname):\n\n cfields = [f\"{fieldname}_{ax}\" for ax in \"xyz\"]\n exists = all([field in self for field in cfields])\n if not exists:\n return None\n\n for field in cfields:\n self[field][\"vector_fieldname\"] = fieldname\n\n units = self[cfields[0]].get(\"units\", None)\n self.arbor.add_derived_field(\n fieldname, _vector_func, vector_field=True, units=units)\n self.arbor.add_derived_field(\n f\"{fieldname}_magnitude\", _magnitude_func, units=units)\n return fieldname",
"def __iadd__(self, vector):\n\n if isinstance(vector, Vector3D):\n self._vct += vector\n return self\n return NotImplemented",
"def register_vectors(self, vectors):\n\n self.vectors.extend(vectors)",
"def __add__(self, v):\r\n assert len(self) == len(v)\r\n return vector([ x1 + x2 for x1,x2 in zip(self,v)])",
"def add_features(self, new_features):\r\n\t\tfor t, f in zip(self.tracks, new_features):\r\n\t\t\tt.add_features(f.view(1, -1))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If there is at least an feature vector then returns it, else returns None | def get_last_feature_vectors(self):
if len(self._feature_vectors):
return self._feature_vectors[-1]
return None | [
"def get_feature_vector(name):\n pass",
"def test_get_feature(self):\n # Checking context features\n feature_tensor = self.parser.get_feature(\n self.feature_config.get_feature(\"query_text\"),\n extracted_features=({\"query_text\": tf.zeros((3, 4, 6))}, {}),\n sequence_size=10,\n )\n assert feature_tensor.shape == (1, 3, 4, 6)\n\n # Check missing feature being replaced with default tensor\n feature_tensor = self.parser.get_feature(\n self.feature_config.get_feature(\"query_text\"),\n extracted_features=({}, {}),\n sequence_size=10,\n )\n assert feature_tensor.shape == (1,)\n\n # Checking sequence features\n feature_tensor = self.parser.get_feature(\n self.feature_config.get_feature(\"quality_score\"),\n extracted_features=({}, {\"quality_score\": tf.zeros((3, 4, 6))}),\n sequence_size=10,\n )\n assert feature_tensor.shape == (3, 4, 6)\n\n # Check missing feature being replaced with default tensor\n feature_tensor = self.parser.get_feature(\n self.feature_config.get_feature(\"quality_score\"),\n extracted_features=({}, {}),\n sequence_size=10,\n )\n assert feature_tensor.shape == (10,)",
"def feature_extractor():\n pass",
"def get_features(node_features, no_features):\t\n\tif(no_features==1):\n\t\treturn node_features.embedding\n\tfeatures = np.concatenate((node_features.features))\t\n\tif(no_features==2):\n\t\treturn np.concatenate((node_features.embedding, features))\n\telse:\n\t\twalk = np.concatenate((node_features.walk[0], node_features.walk[1], node_features.walk[2]))\n\t\treturn np.concatenate((node_features.embedding, features, walk))",
"def get_feature(feature_db, point):\n for feature in feature_db:\n if feature.location == point:\n return feature\n return None",
"def get_feature_vector(self, board):\n return self.hot_one(board)\n # return self.get_tesauro_feature_vector(self, board)",
"def _get_features(task, features, model, similarity_strategy=None):\n X = []\n langs = analysis_utils.get_langs_for_task(task)\n for feature in features:\n if feature != \"size\":\n # this is a nested array\n X_feature = analysis_utils.load_lang2vec_vectors(task=task, features=feature)\n if X_feature is None:\n #continue\n return None\n if similarity_strategy != \"-\":\n # We start with similarities to english\n X_feature = [[sim] for sim in analysis_utils.compute_similarities_of_lang_vecs(X_feature, strategy=similarity_strategy)]\n elif feature == \"size\" and model == \"xlmr\":\n # this is an array, we put it in a list\n X_feature = [[size] for size in analysis_utils.xlmr_input_corpus_sizes(langs)]\n elif feature == \"size\" and model == \"mbert\":\n X_feature = [[size] for size in analysis_utils.mbert_input_corpus_sizes(langs)]\n else:\n raise ValueError()\n # we now have a feature vector for a single feature or feature set\n if len(X) == 0:\n X = np.array(X_feature)\n else:\n X = np.concatenate((X,np.array(X_feature)), axis=1)\n if len(X) == 0:\n return None\n return np.array(X, dtype=float)",
"def featureByName(self, name):\n for feature in self.features:\n if feature.name == name:\n return feature\n return None",
"def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features",
"def __getitem__(self, feat):\n # We perform the test for presence explicitly, to maintain a consistent\n # notion of len(self). If we just returned self.features[k], the\n # defaultdict self.features could self.update(k=float()), thus\n # extending self's length by one.\n return self.features[feat] if feat in self.features else 0.",
"def feature_vector(features, vector):\n clean_features = set(features)\n new_features_vector = featurize(vector,clean_features)\n return new_features_vector",
"def get_vector(self, token):\n try:\n idx = self.token_to_idx[token]\n except KeyError:\n print(\"Input token <{}> is not in the model. Will return None type vector\".format(token))\n return None\n return self.embeddings_mat[idx]",
"def feature_set(self) -> Optional[pulumi.Input['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")",
"def get_feature(feature):\n dict_features = {str(f): f for f in FEATURES}\n\n if isinstance(feature, str):\n f = dict_features.get(feature)\n if f is None:\n raise ValueError(\n f\"{feature} is not a valid value. \"\n \"Use sorted(pfhedge.features.FEATURES) to get valid options.\"\n )\n else:\n f = feature\n if not isinstance(feature, Feature):\n raise TypeError(f\"{feature} is not an instance of Feature.\")\n return f",
"def features(self):\n if self._classifier is None:\n return None\n\n return self._classifier.most_informative_features()",
"def get_vector(self) -> Optional[List[_Score]]:\n\n if len(self._vector) is 0:\n return None\n else:\n return self._vector",
"def get_base_features(\n self,\n x: np.ndarray,\n time: np.ndarray,\n ) -> Optional[Tensor]:\n\n funcs = {\n \"tsfeatures\": self._get_tsfeatures,\n \"ts2vec\": self._get_ts2vec,\n }\n # get features by given feature types\n features = []\n for ft in self.feature_type:\n if ft in funcs:\n features.append(funcs[ft](x, time))\n if len(features) > 0:\n return torch.cat(features, 1)\n return None",
"def get_next_feature():\n if not TEST_FEATURES:\n return False, None\n feature = TEST_FEATURES.pop(0)\n return feature",
"def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
function used for marking deducted Late checkin request. | def action_payslip_done(self):
for recd in self.late_check_in_ids:
recd.state = 'deducted'
return super(PayslipLateCheckIn, self).action_payslip_done() | [
"def checkin(self):\n folio = self.folio_id\n if folio.payment_deposits <= 0:\n raise UserError(_(\"\"\"No record of security deposit found on folio {}\n \"\"\".format(folio.name)))\n if folio.state != 'on_queue':\n raise UserError(_(\n 'Folio {} is not yet to be processed'.format(self.folio_id.name)))\n hours, minutes = decimal_to_time(self.env.user.company_id.checkin_hour)\n can_check_in = datetime.combine(\n date.today(), tm(hours, minutes)) < datetime.now()\n if not can_check_in:\n raise UserError(\n 'Guest(s) cannot be checked in earlier than {}'.format(\n self.env.user.company_id.checkin_hour))\n if self.folio_id.room_id.occupy():\n self.folio_id.write({'state': 'checkin'})",
"def checkin(self, checkin):\n\n self._checkin = checkin",
"def Daysleftverification():\n pass",
"def update_donation():",
"async def mark_not_needed(self) -> \"EntityProgressReporter\":",
"def test_is_gate_overdue__not_started(self):\n self.gate_1.requested_on = None\n self.assertFalse(slo.is_gate_overdue(\n self.gate_1, APPR_FIELDS, DEFAULT_SLO_LIMIT))",
"def borrow_request_new(request):\n if request.user.is_authenticated():\n has_new = False\n if BorrowRequest.objects.filter(recipient=request.user, status=rs.PENDING) .count() > 0:\n has_new = True\n return {'new_br': has_new}\n else:\n return {}",
"def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('refuse')\n if res.state == 'refuse':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_refused\"\n )",
"def check_leave_request_holiday(self, cr, uid, att, context=None):\n if att:\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n \n public_holiday_obj = self.pool.get('trobz.hr.public.holidays')\n public_holiday_ids = public_holiday_obj.search(cr, uid, [('date', '=', att.day_tz), ('state', '=', 'approved')], context=context)\n if public_holiday_ids:\n return True\n sql = '''\n SELECT line.first_date_type, line.first_date, line.last_date_type, line.last_date\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND line.first_date <= '%s' AND line.last_date >= '%s'\n AND h.state = 'validate'\n '''% (att.employee_id.id, att.day_tz, att.day_tz)\n cr.execute(sql)\n for leave in cr.fetchall():\n if att.action == 'sign_out':\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 13\n else:\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 12\n if att.day_tz == leave[1]:\n if leave[0] == 'afternoon' and afternoon:\n return True\n if leave[0] == 'morning' and not afternoon:\n return True\n if leave[0] == 'full':\n return True\n if att.day_tz == leave[3]:\n if leave[2] == 'afternoon' and afternoon:\n return True\n if leave[2] == 'morning' and not afternoon:\n return True\n if leave[2] == 'full':\n return True\n if datetime.strptime(att.day_tz, '%Y-%m-%d') > datetime.strptime(leave[1], '%Y-%m-%d')\\\n and datetime.strptime(att.day_tz, '%Y-%m-%d') < datetime.strptime(leave[3], '%Y-%m-%d'):\n return True\n return False",
"def _check_leave_request(self, cr, uid, request, token, context=None):\n holidays_obj = request.registry['hr.holidays']\n holidays_ids = holidays_obj.search(cr, uid, [\n ('token', '=', token)\n ])\n\n if len(holidays_ids) == 0:\n return request.website.render(\n \"tk_hr_approve_request.leave_request_not_found\"\n )\n\n _id = holidays_ids[0] if len(holidays_ids) else None\n if _id:\n leave_request = holidays_obj.browse(\n cr, uid, _id, context=context\n )\n return leave_request",
"def unmark():\n with _APP.app_context():\n now = datetime.datetime.now()\n users = db.session.query(User)\\\n .filter_by(is_positive = True)\\\n .all() \n\n negatives = []\n for u in users:\n if u.positive_datetime+datetime.timedelta(days=14) <= now:\n negatives.append(u)\n unmark_positive_user(u.id)\n\n logger.info(negatives)",
"def check_indemnizacion_legal(self):\n self.checkbox.check(finiquito_masivo_catalog.CHECKBOX_INDEMNIZACION_LEGAL)",
"def test_is_gate_overdue__already_responded(self):\n self.gate_1.responded_on = datetime.datetime(2023, 6, 12, 12, 30, 0) # Mon\n self.assertFalse(slo.is_gate_overdue(\n self.gate_1, APPR_FIELDS, DEFAULT_SLO_LIMIT))",
"def check_absent_pre_date(self, cr, uid, att, context=None):\n if att:\n # check employee absent pre date\n pre_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), \n ('name', '<', att.name), \n ('action', 'in', ('sign_in', 'sign_out'))], \n limit=1)\n param_obj = self.pool.get('ir.config_parameter')\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n trobz_base_obj = self.pool.get('trobz.base')\n att_name = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_late = att_name - timedelta(minutes = max_late)\n \n working_hour_ids=[] #Payroll Working Hours (Only read working PWH, Not Leave or Overtime PWH) \n if not pre_att_ids:\n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_end', '<', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('plan_line_id', '!=', False)\n ], \n context=context)\n else:\n pre_time_early = self.read(cr, uid, pre_att_ids[0], ['name_tz'], context=context)['name_tz']\n time_start_early = datetime.strptime(pre_time_early, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes = max_early)\n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '>', time_start_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '<', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('plan_line_id', '!=', False)\n ], context=context, order='date DESC')\n if not working_hour_ids:\n return False\n else:\n for working in working_hour_obj.browse(cr, uid, working_hour_ids, context=context):\n # check public holiday\n holiday_ids = self.pool.get('trobz.hr.public.holidays').search(cr, uid, [('date','=', working.date)], context=context) \n if holiday_ids:\n return False\n # full\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND line.first_date < '%s' AND line.last_date > '%s'\n AND h.state = 'validate'\n '''% (working.employee_id.id, working.date, working.date)\n cr.execute(sql)\n if cr.fetchall():\n continue\n else:\n sql = False\n expected_start = trobz_base_obj.convert_from_utc_to_current_timezone(cr, uid, working.expected_start, False, DEFAULT_SERVER_DATETIME_FORMAT, False, context=context)\n time_start = expected_start.hour\n expected_end = trobz_base_obj.convert_from_utc_to_current_timezone(cr, uid, working.expected_end, False, DEFAULT_SERVER_DATETIME_FORMAT, False, context=context)\n time_end = expected_end.hour\n # wh afternoon\n if time_start >= 12 and time_end >=12:\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'afternoon' OR line.first_date_type = 'afternoon')\n '''% (working.employee_id.id, working.date, working.date)\n # wh morning\n elif time_start < 12 and time_end <= 12:\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'morning' OR line.first_date_type = 'morning')\n '''% (working.employee_id.id, working.date, working.date)\n \n if sql:\n cr.execute(sql)\n if cr.fetchall():\n continue\n # wh full\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'full' OR line.first_date_type = 'full')\n '''% (working.employee_id.id, working.date, working.date)\n cr.execute(sql)\n res = cr.fetchall()\n if res or (time_late >= expected_start and time_late <= expected_end):\n continue\n return True\n return False",
"def awaiting_payment(self):",
"def trip_review_ready(self):\n if self.requests.filter(status__in=[12, 16, 17, ]).exists():\n can_proceed = False\n reason = _(\"some requests are still in the review / recommendation phase.\")\n elif self.current_reviewer and self.current_reviewer.role == 5: # There are different criteria for ADM\n adm_ready_travellers = Traveller.objects.filter(request__trip=self, request__status=14)\n if adm_ready_travellers.exists():\n can_proceed = True\n reason = _(\"By approving this trip, the following travellers will be automatically approved: \") + \\\n f'<ul class=\"mt-3\">{listrify([f\"<li>{t.smart_name}</li>\" for t in adm_ready_travellers.all()], \"\")}</ul>'\n else:\n can_proceed = True\n reason = _(\"All actionable requests have been actioned.\")\n # this is a special case of the below scenario, where no trips are ready for ADM but should still proceed\n elif self.requests.count() == self.requests.filter(Q(status=11) | Q(status=8) | Q(status=10) | Q(status=22)).count():\n can_proceed = True\n reason = _(\"All actionable requests have already been approved.\")\n elif not self.requests.filter(status=14).exists():\n can_proceed = False\n reason = _(\"there are no requests ready for ADM approval.\")\n else:\n can_proceed = True\n reason = _(\"all active requests are ready for ADM review.\")\n return dict(can_proceed=can_proceed, reason=reason)",
"async def submit_annual_leave_claim(request: web.Request, id, body) -> web.Response:\n body = LeaveClaim.from_dict(body)\n return web.Response(status=200)",
"def test_introduce_decline_request(self):\n pass",
"def carry_out(bill_req):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decode next layer protocol. | def _decode_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs
raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_decode_next_layer'") | [
"def _decode_next_layer(self, dict_, length=None):\n # make next layer protocol name\n proto = str(self._prot or 'Raw').lower()\n\n # make BytesIO from frame package data\n bytes_ = io.BytesIO(self._file.read(dict_['len']))\n info, protochain = self._import_next_layer(bytes_, length)\n\n # write info and protocol chain into dict\n self._protos = ProtoChain(self._prot, protochain)\n dict_[proto] = info\n dict_['protocols'] = self._protos.chain\n return dict_",
"def Decode(self, encoded_data):",
"def decode(self):\n instr = self.fetch()",
"def _DecodeFn():\n _, decode_dict = self._model.ConstructDecodeGraph(\n input_batch=inp_instance.TpuDequeueBatch())\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return self.decode_nm.Flatten()",
"def decode(self, input, final=False):\r\n raise NotImplementedError",
"def handle_decode(self, encoded_data):\n \n config.COD_PROMPT = config.DEC_PROMPT\n print config.DEC_PROMPT + \" decoding...\"\n \n # while there is another decoder, run each item through the next decoder\n data = encoded_data\n success = False\n for decoder in self.decoder_list:\n current_decoder = decoder()\n success, data = self.recursive_decoder(current_decoder.decode, data)\n if not success:\n break\n print config.DEC_PROMPT + \"%s decoded to '%s'\" % ( current_decoder.name(),data)\n return success, data",
"def _decode(self):\n \n self.version = int(data_to_hex_str(self.packet[0])[2])\n self.header_len = int(data_to_hex_str(self.packet[0])[3]) * 4\n self.type_of_service = data_to_hex_str(self.packet[1:2])\n self.total_len = int(data_to_hex_str(self.packet[2:4]), 16)\n self.id = data_to_hex_str(self.packet[4:6])\n \n #parse the flags fields(reservedbit, don't fragment, more fragment)\n if ((ord(self.packet[6]) & (1 << 7)) != 0):\n self.flags_reservedbit = 1\n else:\n self.flags_reservedbit = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 6)) != 0):\n self.flags_dont_fragment = 1\n else:\n self.flags_dont_fragment = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 5)) != 0):\n self.flags_more_fragment = 1\n else:\n self.flags_more_fragment = 0\n #endof if\n \n #parse the offset field(in packet[6:7]): 00011111 & packet[6] (to filter flags) -->> get packet[6:7] in hex_str\n #tmp = str(31 & ord(self.packet[6]))\n self.fragment_offset = int(data_to_hex_str(self.packet[6:8]), 16)\n if (self.fragment_offset >= (1 << 13)):\n #take away the flags fields: 00011111 11111111 & self.fragment_offset\n self.fragment_offset = self.fragment_offset & ((1 << 13) - 1) \n \n self.TTL = ord(self.packet[8])\n self.protocol = IPPROTO[ord(self.packet[9])]\n self.header_checksum = data_to_hex_str(self.packet[10:12])\n \n self.src = str(ord(self.packet[12])) + '.' + str(ord(self.packet[13])) + '.' + \\\n str(ord(self.packet[14])) + '.' + str(ord(self.packet[15]))\n self.dst = str(ord(self.packet[16])) + '.' + str(ord(self.packet[17])) + '.' + \\\n str(ord(self.packet[18])) + '.' + str(ord(self.packet[19]))\n \n if (self.header_len > 20):\n self.opt_paddings = self.packet[20 : (self.header_len)]",
"def decode(self, z):\n raise NotImplementedError",
"def decode(self, encoded):",
"def _define_decoder(self):\n raise NotImplementedError",
"def decoder(encode, layer, n_step, size, num_layers, drop_frac=0.0, aux_input=None,\n bidirectional=False, **parsed_args):\n decode = RepeatVector(n_step, name='repeat')(encode)\n if aux_input is not None:\n decode = Concatenate()([aux_input, decode])\n\n for i in range(num_layers):\n if drop_frac > 0.0 and i > 0: # skip these for first layer for symmetry\n decode = Dropout(drop_frac, name='drop_decode_{}'.format(i))(decode)\n wrapper = Bidirectional if bidirectional else lambda x: x\n decode = wrapper(layer(size, name='decode_{}'.format(i),\n return_sequences=True))(decode)\n\n decode = TimeDistributed(Dense(1, activation='linear'), name='time_dist')(decode)\n return decode",
"def decode(self, decomposition):\n raise NotImplementedError",
"def _decode_end(_fp):\n return 0",
"def test_decode(self):\n pass # TODO(tlarsen)",
"def decode(self,m):\n raise NotImplementedError('subclasses must override decode()!')",
"def bdecode(f):\n\tbtype = TYPES[f.read(1)]\n\tif btype is not None:\n\t\tf.seek(-1, SEEK_CUR)\n\t\treturn DECODERS[btype](f)\n\telse: #Used in dicts and lists to designate an end\n\t\treturn None",
"def _create_decoding_layers(self, last_encode):\n\n next_decode = last_encode\n\n for l, layer in reversed(list(enumerate(self.layers))):\n\n with tf.name_scope(\"decode-{}\".format(l)):\n\n # Create decoding variables\n dec_w = tf.Variable(tf.transpose(self.encoding_w_[l].initialized_value()))\n dec_b = tf.Variable(tf.constant(0.1, shape=[dec_w.get_shape().dims[1].value]))\n self.decoding_w.append(dec_w)\n self.decoding_b.append(dec_b)\n\n y_act = tf.matmul(next_decode, dec_w) + dec_b\n\n if self.finetune_act_func == 'sigmoid':\n layer_y = tf.nn.sigmoid(y_act)\n\n elif self.finetune_act_func == 'tanh':\n layer_y = tf.nn.tanh(y_act)\n\n elif self.finetune_act_func == 'relu':\n layer_y = tf.nn.relu(y_act)\n\n else:\n layer_y = None\n\n # the input to the next layer is the output of this layer\n next_decode = tf.nn.dropout(layer_y, self.keep_prob)\n\n self.layer_nodes.append(next_decode)\n\n self.reconstruction = next_decode",
"def decode(cls, data: bytes) -> \"KNXDPacket\":\n return cls(KNXDPacketTypes(int.from_bytes(data[0:2], byteorder='big')), data[2:])",
"def greedy_decode(self, z):\r\n\r\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Import next layer extractor. | def _import_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs
raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_import_next_layer'") | [
"def _next_layer(self, layer):\n return self.layers[layer + 1]",
"def _import_next_layer(self, file_, length):\n if self._prot == 'Ethernet':\n from .link import Ethernet as Protocol\n elif self._prot == 'IPv4':\n from .internet import IPv4 as Protocol\n elif self._prot == 'IPv6':\n from .internet import IPv6 as Protocol\n else:\n data = file_.read(*[length]) or None\n return data, None\n next_ = Protocol(file_, length)\n return next_.info, next_.protochain",
"def add(self, layer):\n self.extractor.add(layer)",
"def set_next_layer(self, layer):\n self._next_layer = layer",
"def _imported_functions(self):\n\n i = 0\n while 1:\n thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.OriginalFirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n\n # We've reached the end when the element is zero \n if thunk == None or thunk.AddressOfData == 0:\n break\n\n o = obj.NoneObject(\"Ordinal not accessible?\")\n n = obj.NoneObject(\"Imported by ordinal?\")\n f = obj.NoneObject(\"FirstThunk not accessible\")\n\n # If the highest bit (32 for x86 and 64 for x64) is set, the function is \n # imported by ordinal and the lowest 16-bits contain the ordinal value. \n # Otherwise, the lowest bits (0-31 for x86 and 0-63 for x64) contain an \n # RVA to an _IMAGE_IMPORT_BY_NAME struct. \n if thunk.OrdinalBit == 1:\n o = thunk.Ordinal & 0xFFFF\n else:\n iibn = obj.Object(\"_IMAGE_IMPORT_BY_NAME\",\n offset = self.obj_parent.DllBase +\n thunk.AddressOfData,\n vm = self.obj_native_vm)\n o = iibn.Hint\n n = iibn.Name\n\n # See if the import is bound (i.e. resolved)\n first_thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.FirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n if first_thunk:\n f = first_thunk.Function.v()\n\n yield o, f, str(n or '')\n i += 1",
"def updateLayers(self):\n\t\tself.layers = self.extractLayers()",
"def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)",
"def get_feature_extractor():\n net = alexnet(pretrained=False)\n net.load_state_dict(model_zoo.load_url(model_urls['alexnet'], \n model_dir=model_urls['local']))\n\n feature_extractor = nn.Sequential(*list(net.classifier.children())[:-1])\n net.classifier = feature_extractor\n net.eval()\n return net",
"def importStep(fileName):\n\n # Now read and return the shape\n reader = STEPControl_Reader()\n readStatus = reader.ReadFile(fileName)\n if readStatus != OCC.Core.IFSelect.IFSelect_RetDone:\n raise ValueError(\"STEP File could not be loaded\")\n for i in range(reader.NbRootsForTransfer()):\n reader.TransferRoot(i + 1)\n\n occ_shapes = []\n for i in range(reader.NbShapes()):\n occ_shapes.append(reader.Shape(i + 1))\n\n # Make sure that we extract all the solids\n solids = []\n for shape in occ_shapes:\n solids.append(Shape.cast(shape))\n\n return cq.Workplane(\"XY\").newObject(solids)",
"def _load_exr_layers(self):\n # show a progress busy indicator\n self.msg_win.show_msg(\"Loading\",\n \"Loading {0} Exr Layers, Please Wait.\".format(len(self.exr_image.layer_names())))\n QtWidgets.QApplication.processEvents()\n\n # load the layers as images\n errors = self.exr_image.load_layers()\n # done loading hide window\n self.msg_win.msg_box.hide()\n # display any errors\n if errors:\n self.msg_win.show_error_msg(\"Exr Channel Error\", ', '.join(errors))\n return\n\n # build layer menu - clear first in case loading a new image\n self._build_layer_menu()\n # show the rgb of the image, pass True to tell it to reset view to fit image\n self.display_layer(True)\n # set focus to image\n self.setFocus()",
"def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()",
"def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)",
"def import_forward(self):\n self.import_property('OG')\n self.import_property('IBU')\n self.import_property('ABV')\n self.import_property('SRM')",
"def _CreateLayerOut(self,aFN,explodeD):\n #Check which file this is\n layerok = False\n for l in explodeD:\n if explodeD[l]['params']['pattern'] in aFN:\n layerok = l\n break \n if layerok:\n #print (explodeD[l]['layer'].FPN)\n if explodeD[l]['layer']._Exists():\n #session._InsertSceneBand(explodeD[l]['layer'])\n return False\n else:\n #print ('exploding',explodeD[l]['layer'].FN)\n\n return explodeD[l]['layer'] \n else:\n pass\n #print 'WARNING %s not extracted' %(aFN)",
"def add_layer(self, full_path, delimiter=\"::\"):\n if self.find_layer_from_fullpath(full_path):\n return self.find_layer_from_fullpath(full_path)\n else:\n # Cumulative List Split\n # Using accumulate() + join()\n temp = full_path.split(delimiter)\n res = list(accumulate(temp, lambda x, y: delimiter.join([x, y])))\n parent_layer = Layer()\n for part in res:\n if self.find_layer_from_fullpath(part):\n parent_layer = self.find_layer_from_fullpath(part)\n continue\n else:\n *parent_name, name = part.split(delimiter)\n _layer = Layer() # Create Layer\n _layer.Name = name # Set Layer Name\n if parent_layer:\n _layer.ParentLayerId = parent_layer.Id # Set parent Id\n self._file3dm.Layers.Add(_layer) # Add Layer\n _layer = self._file3dm.Layers.FindName(name, parent_layer.Id)\n\n # set parent layer to this layer (for next iter)\n parent_layer = _layer\n # Sets Layer as class attr\n setattr(UmiLayers, _layer.FullPath, _layer)\n return _layer",
"def get_layer(self, i):\n if i == 0:\n print('No Layer object for the input layer.')\n return None\n elif i < 0:\n return self.layers[i]\n else:\n return self.layers[i-1]",
"def next_layer_in_menu(self):\n menu_size = int(self.layer_list_menu.count())\n if menu_size > 0:\n next_layer_ind = (self.layer_list_menu.currentIndex() + 1) % menu_size\n self.layer_list_menu.setCurrentIndex(next_layer_ind)",
"def extractML(self):\n print(\"extracting...\")\n multi=MultiLayer(self.layerStruct,LayerNTList([]),LinkNTList([]))\n for i in range(self.layers.length()):\n nodel = []\n lay=self.layers.giveLayer(i).giveLayerLabel() \n print(\"layer\",lay)\n for j in self.layers.giveLayer(i).giveNodesT().giveListOfNodes():#on sélectionne les noeuds apparaissant à t\n nodel.append(Node(j.giveNode()))\n nu=NodeList(nodel)\n multi.addLayer(LayerNT(self.layerStruct,lay,NodeList(nodel)))\n for j in self.em.giveListOfLinks():\n tab=j.giveLabel()\n multi.addLink(LinkNT(Node(tab[0]),tab[2],Node(tab[1]),tab[3])) \n return(multi)",
"def set_next(self, next_layer):\n self.next_layer = next_layer"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build QA data dict from the nights | def build_data(self):
from desiutil.io import combine_dicts
# Loop on exposures
odict = {}
for qanight in self.qa_nights:
for qaexp in qanight.qa_exps:
# Get the exposure dict
idict = write_qa_exposure('foo', qaexp, ret_dict=True)
odict = combine_dicts(odict, idict)
# Finish
self.data = odict | [
"def champion_features():\n\n rv = {}\n with open('../data/all_weeks.csv') as handle:\n reader = csv.DictReader(handle, ['Week', 'Name', 'Difficulty', 'Since', 'Times', 'RiotMeta'])\n\n for line in reader:\n name = line['Name']\n difficulty = line['Difficulty']\n since = line['Since']\n times = int(line['Times'])\n\n champ_meta = line['RiotMeta'].split(':')\n\n week_num = int(line['Week'])\n\n if week_num not in rv:\n rv[week_num] = {}\n for meta in METAS:\n rv[week_num][meta] = {}\n rv[week_num][None] = {}\n\n for meta in champ_meta:\n rv[week_num][meta][name] = {\n 'name': name,\n 'difficulty': difficulty,\n 'since': since,\n 'times': times,\n }\n\n #I shoud feel dirty for copy pasta.\n rv[week_num][None][name] = {\n 'name': name,\n 'difficulty': difficulty,\n 'since': since,\n 'times': times,\n }\n\n return rv",
"def innovation_investment(self):\n answers = {\n \"1310\": self.yes_no_question(\"1310\"),\n \"2675\": self.checkbox_question(\"2675\"),\n \"2676\": self.checkbox_question(\"2676\"),\n \"2677\": self.checkbox_question(\"2677\"),\n \"1410\": self.round_and_divide_by_one_thousand(self.get_qcode(\"1410\")),\n \"1320\": self.yes_no_question(\"1320\"),\n \"1420\": self.round_and_divide_by_one_thousand(self.get_qcode(\"1420\")),\n \"1330\": self.yes_no_question(\"1330\"),\n \"1331\": self.checkbox_question(\"1331\"),\n \"1332\": self.checkbox_question(\"1332\"),\n \"1333\": self.checkbox_question(\"1333\"),\n \"1430\": self.round_and_divide_by_one_thousand(self.get_qcode(\"1430\")),\n \"1340\": self.yes_no_question(\"1340\"),\n \"1440\": self.round_and_divide_by_one_thousand(self.get_qcode(\"1440\")),\n \"1350\": self.yes_no_question(\"1350\"),\n \"1450\": self.round_and_divide_by_one_thousand(self.get_qcode(\"1450\")),\n \"1360\": self.yes_no_question(\"1360\"),\n \"1460\": self.round_and_divide_by_one_thousand(self.get_qcode(\"1460\")),\n \"1370\": self.yes_no_question(\"1370\"),\n \"1371\": self.checkbox_question(\"1371\"),\n \"1372\": self.checkbox_question(\"1372\"),\n \"1373\": self.checkbox_question(\"1373\"),\n \"1374\": self.checkbox_question(\"1374\"),\n \"1470\": self.round_and_divide_by_one_thousand(self.get_qcode(\"1470\")),\n }\n\n return answers",
"def prepare_additional_data(self):\n self.websites_list.append(self.main_website)\n self.websites_list = WebsiteMeasurement.get_sorted_ranking(\n self.websites_list\n )\n result_data = dict()\n result_data['comparison_result'] = self.get_comparison_result()\n result_data['comparison_date'] = str(datetime.datetime.now())\n\n self.result_data = result_data",
"def nutrition_data(self):\n data = dict()\n\n # get required data, generally from nutrient fields but some special cases\n data['cost'] = self.best_price\n data['grams'] = settings.STANDARD_WEIGHT # data stored per KG or 100g\n if self.serving:\n data['grams_serve'] = self.serving # optional serving size\n for k in settings.NUTRITION_DATA_ITEMS_BASIC:\n data[k] = getattr(self,k)\n\n return add_nutrition_ratios(data) # generate ratios and values from above",
"def create_populate_url_dict():\n url_dict = {}\n url_dict[\n \"The Village\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/spectrum/village-at-irvine-spectrum/availability.html\"\n url_dict[\n \"Oak Glen\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/oak-creek/oak-glen/availability.html\"\n url_dict[\n \"Cypress Village\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/cypress-village/communities/availability.html\"\n url_dict[\n \"Avella\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/cypress-village/avella/availability.html\"\n url_dict[\n \"Quail Hill\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/quail-hill/communities/availability.html\"\n url_dict[\n \"The Park\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/spectrum/the-park/availability.html\"\n url_dict[\n \"Woodbury Court\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/woodbury/woodbury/woodbury-court/availability.html\"\n url_dict[\n \"Centerpointe\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/spectrum/centerpointe/availability.html\"\n url_dict[\n \"Westview\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/spectrum/westview/availability.html\"\n url_dict[\n \"Los Olivos\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/spectrum/los-olivos/availability.html\"\n url_dict[\n \"Promenade\"\n ] = \"https://www.irvinecompanyapartments.com/locations/orange-county/irvine/spectrum/promenade-at-spectrum/availability.html\"\n\n return url_dict",
"def create_country_q(self):\n # recreate a new ordered dict\n new_dict = OrderedDict()\n\n for k in self.dict_questions:\n # First check if there is country_specific condition.\n # In that case, need to create a question for each possibility to be able to show the different answers\n # As limesurvey does not allow the creation of conditions for questions.\n if self.dict_questions[k]['country_specific'] in self.list_bool and self.dict_questions[k]['answer_format'].lower() in ['one choice', 'y/n/na', 'multiple choices']:\n for country in self.create_country_list(self.dict_questions[k]):\n new_question = self.dict_questions[k].copy()\n # check if that country has a specific answer file\n if new_question['answer_file'] != '':\n outfile = os.path.join(self.year, \"answers\", 'countries', country, \"{}.csv\".format(new_question['answer_file']))\n try:\n open(outfile)\n new_question['answer_file'] = outfile\n\n # If the specific file is not found it means that country does not need a specific version\n # of the question. Then keep the original question.\n except IOError:\n outfile = os.path.join(self.year, 'answers', \"{}.csv\".format(new_question['answer_file']))\n new_question['answer_file'] = os.path.join(self.year, 'answers', \"{}.csv\".format(new_question['answer_file']))\n try:\n open(outfile)\n new_question['answer_file'] = outfile\n # If there is no file in the answer folder. It means that for that country (more likely world), the question should be FREETEXT\n except IOError:\n new_question['answer_file'] = ''\n new_question['answer_format'] = 'FREETEXT'\n new_question['other'] = ''\n new_question['country_specific'] = ''\n\n new_code = '{}q{}'.format(k, country)\n new_question['country_specific'] = ''\n for c in self.dict_countries.keys():\n new_question[c] = ''\n new_question['world'] = ''\n new_question[country] = 'Y'\n new_dict[new_code] = new_question\n\n else:\n new_dict[k] = self.dict_questions[k].copy()\n if new_dict[k]['answer_file'] != '':\n new_dict[k]['answer_file'] = os.path.join(self.year, 'answers', \"{}.csv\".format(new_dict[k]['answer_file']))\n self.dict_questions = new_dict.copy()",
"def build_meta_stock():\n stock = query('t8430', {'gubun':'0'}).get('t8430OutBlock', [])\n return dict(zip(\n map(lambda s: s['shcode'], stock),\n stock\n ))",
"def createQ(self, state):\n\n ########### \n ## TO DO ##\n ###########\n # When learning, check if the 'state' is not in the Q-table\n # If it is not, create a new dictionary for that state\n # Then, for each action available, set the initial Q-value to 0.0\n \n if self.learning != False:\n if not self.Q.has_key(state):\n self.Q[state] = {}\n for act in self.valid_actions:\n self.Q[state][act] = 0\n return",
"def get_2008_data(hist_dist_std, project_root, weeks_before_election):\n poll_dir = project_root + '/obama2008/'\n\n # first lets find the first day of our polling.\n election_date = datetime.datetime(2008, 11, 4)\n date1 = election_date\n for state in states:\n with open(os.path.join(poll_dir, state + '.txt')) as file:\n for line in file:\n w = line.split('\\t')\n poll_datestring = w[1].split('-')[1].split()[0].split('/')\n poll_date = datetime.datetime(2008, int(poll_datestring[0]), int(poll_datestring[1]))\n date1 = poll_date if poll_date < date1 else date1\n\n n_days_total = (election_date - date1).days + 1\n\n # let's create a mapping from day to week (weeks start on Tuesday because election day is a Tuesday)\n week1_start = date1 - datetime.timedelta(date1.weekday() - 1) # the first Tuesday of the first week we poll\n day_to_week_map = []\n for i in range(0, n_days_total):\n day_to_week_map.append(((date1 + datetime.timedelta(days=i)) - week1_start).days // 7 + 1)\n n_wks_total = max(day_to_week_map)\n\n # now let's get the poll data.\n polls_n_voters = []\n polls_n_democratic = []\n polls_n_day = []\n polls_n_wk = []\n polls_state = []\n for state_numb, state in enumerate(states):\n with open(os.path.join(poll_dir, state + '.txt')) as file:\n for line in file:\n w = line.split('\\t')\n if len(w) < 6:\n # print('skipping ' + line)\n continue\n idx_date = 1\n idx_n_voters = 2\n idx_dem = 4 if len(w) == 6 else 5\n idx_repub = 3 if len(w) == 6 else 4\n poll_datestring = w[idx_date].split('-')[1].split()[0].split('/')\n poll_day = (datetime.datetime(2008, int(poll_datestring[0]), int(poll_datestring[1])) - date1).days + 1\n # some polls were taken after election day, so we can throw them out.\n if poll_day >= n_days_total:\n # print('skipping ' + line)\n continue\n # sometimes we only want to look at polls up till a certain date:\n if day_to_week_map[poll_day] > n_wks_total - weeks_before_election:\n # print('skipping ' + line)\n continue\n voter_count_str = w[idx_n_voters].split()[0]\n if not voter_count_str.isnumeric():\n # print('skipping ' + line)\n continue\n voter_count = int(voter_count_str)\n polls_n_voters.append(voter_count)\n polls_n_democratic.append(\n math.ceil(voter_count * float(w[idx_dem]) / (float(w[idx_dem]) + float(w[idx_repub]))))\n polls_n_day.append(poll_day)\n polls_n_wk.append(day_to_week_map[poll_day - 1])\n # print('added poll for ' + state + ' - state # {}'.format(state_numb))\n polls_state.append(state_numb + 1)\n\n hist_dist, hist_dist_precision = create_historical_predictions_for_2008(hist_dist_std, project_root)\n\n return {'polls_n_voters': polls_n_voters,\n 'polls_n_democratic': polls_n_democratic,\n 'polls_n_day': polls_n_day,\n 'polls_n_wk': polls_n_wk,\n 'polls_state': polls_state,\n 'n_days_total': int(n_days_total),\n 'day_to_week_map': day_to_week_map,\n 'n_wks_total': n_wks_total,\n 'n_polls': len(polls_n_day),\n 'n_states': 50,\n 'hist_dist': hist_dist,\n 'hist_dist_precision': hist_dist_precision}",
"def get_q_string_dat(self):\n\n state_pk = [i for i in self.form.cleaned_data['states'] if i != '1'] # pk=1 is blank\n states = State.objects.filter(pk__in=state_pk).values()\n states_out = str([i['state'] for i in states]).replace(\" \", '').replace(\"'\", '').strip('[]')\n\n region_pk = [i for i in self.form.cleaned_data['region'] if i != '1'] # pk=1 is blank\n regions = Region.objects.filter(pk__in=region_pk).values()\n regions_out = str([i['region'] for i in regions]).replace(\" \", '').replace(\"'\", '').strip('[]')\n\n event_pk = self.form.cleaned_data['event']\n events = EventType.objects.filter(pk__in=event_pk).values()\n events_out = str([i['event'].replace(' ', '%20') for i in events]).replace(\" \", '').replace(\"'\", '').strip('[]')\n\n distance_out = self.form.cleaned_data['distance']\n zip_cd = self.form.cleaned_data.get('zip')\n location_out = str(get_location(zip_cd))\n\n self.cadence = int(self.form.cleaned_data.get('dstr_cad'))\n\n return {'states_out': states_out, 'regions_out': regions_out, 'events_out': events_out,\n 'location_out': location_out, 'distance_out': distance_out}",
"def __init_q_values(self, game_state):\n encoded_game_state = self.__encode_state(game_state)\n if encoded_game_state in self.q_values:\n return\n self.q_values[encoded_game_state] = {}\n for free_seat in self.__get_free_seats(game_state):\n self.q_values[encoded_game_state][free_seat] = (self.INITIAL_STATE_VALUE, 0)",
"def dict_trial(set_trials, data):\r\n test = ''\r\n for key, value in set_trials.items():\r\n for j in data[\"Clinical_trials\"]:\r\n if j[\"journal\"] == key:\r\n test = test + j[\"scientific_title\"] \r\n else:\r\n continue\r\n set_trials[key] = test \r\n test = '' \r\n return(set_trials)",
"def __init__(self, name, gender, region, division, homeField, numWeeks): \n self.name = name\n self.gender = gender\n self.region = region\n self.division = division \n self.homeField = homeField\n self.games = []\n self.numHomeGames = 0\n self.numAwayGames = 0\n self.weekly_availability = []\n for i in range(0,numWeeks):\n self.weekly_availability.append(True)\n self.homeAwayRecords = []\n for i in range(0,numWeeks):\n self.homeAwayRecords.append('') # entries will be filled with 'home' or 'away', for the corresponding week",
"def fill_testing_dates(self):\r\n \r\n now = datetime.now()\r\n month = now.strftime('%m')\r\n year = now.year \r\n most_recent_date = '{}-{}-01'.format(year, month)\r\n self.testing_dates[1] = {'cv_start': '1972-01-01', \r\n 'cv_end': '1975-12-01', \r\n 'pred_start': '1976-01-01',\r\n 'pred_end': '1981-07-01'}\r\n self.testing_dates[2] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1981-07-01', \r\n 'pred_start': '1981-08-01',\r\n 'pred_end': '1983-07-01'}\r\n self.testing_dates[3] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1983-07-01', \r\n 'pred_start': '1983-08-01',\r\n 'pred_end': '1992-12-01'}\r\n self.testing_dates[4] = {'cv_start': '1983-08-01', \r\n 'cv_end': '1992-12-01', \r\n 'pred_start': '1993-01-01',\r\n 'pred_end': '2003-07-01'}\r\n self.testing_dates[5] = {'cv_start': '1993-01-01', \r\n 'cv_end': '2003-07-01', \r\n 'pred_start': '2003-08-01',\r\n 'pred_end': '2010-09-01'}\r\n self.testing_dates[6] = {'cv_start': '2003-08-01', \r\n 'cv_end': '2010-09-01', \r\n 'pred_start': '2010-10-01',\r\n 'pred_end': '2021-07-01'}\r\n self.testing_dates[7] = {'cv_start': '2010-10-01', \r\n 'cv_end': '2021-07-01', \r\n 'pred_start': '2021-08-01',\r\n 'pred_end': most_recent_date}",
"def build_worker_profiles(raw_data):\n res = defaultdict(list)\n\n metadata = [\"_trust\", \"_ip\", \"_channel\"]\n ans_choices = [\"yes_direct\", \"yes_indirect\", \"no_relation\", \"ner_mistake\"]\n\n for worker_id, group in raw_data.groupby(\"_worker_id\"):\n test_resp = group.query(\"_golden\")\n work_resp = group.query(\"~_golden\")\n\n res[\"worker_id\"].append(worker_id)\n res[\"test_ques_seen\"].append(len(test_resp[\"uniq_id\"].unique()))\n res[\"work_units_seen\"].append(len(work_resp[\"uniq_id\"].unique()))\n\n res[\"country\"].append(get_country_name(test_resp[\"_country\"].iloc[0]))\n\n for metadata_col in metadata:\n res[metadata_col.lstrip(\"_\")].append(test_resp[metadata_col].iloc[0])\n\n for work_type, resp_data in zip([\"test\", \"work\"], [test_resp, work_resp]):\n time_series = determine_time_taken(resp_data) # time per page\n time_series /= WORK_UNITS_PER_PAGE # time per work unit\n\n stats = time_stats(time_series)\n for i, name in enumerate([\"min\", \"median\", \"max\"]):\n res[\"{0}_{1}_time_per_unit\".format(work_type, name)].append(stats[i])\n\n # look at the response distributions\n for ans_choice in ans_choices:\n temp = resp_data.query(\"verify_relationship == '{0}'\".format(ans_choice))\n res[\"{0}_{1}\".format(work_type, ans_choice)].append(len(temp[\"uniq_id\"].unique()))\n\n return pd.DataFrame(res)",
"def initDictionary(bands):\r\n for x in bands:\r\n d[\"{}\".format(x)] = {ProdCost: [], AlbumSales: []}",
"def process_innovation(self):\n answers = {\n \"0900\": self.yes_no_question(\"0900\"),\n \"1010\": self.checkbox_question(\"1010\"),\n \"1020\": self.checkbox_question(\"1020\"),\n }\n\n return answers",
"def qasmCircuitResults(self):\n returnedDictionary={}\n self.circutDrawing = self.draw()\n self.blochSpheres=self.separatedBlochSpheres()\n returnedDictionary[\"wires\"]=self.num_qubits\n returnedDictionary[\"probabilities\"] = self.separatedProbabilities()\n #returnedDictionary[\"blochSpheres\"] = self.separatedBlochSpheres()\n returnedDictionary[\"diracNotation\"] = self.diracNotation()\n returnedDictionary['chart'] = self.graph()\n returnedDictionary[\"link\"] = \"\"\n #returnedDictionary[\"qasmRows\"] = np.transpose(cols).tolist()\n \n if self.API_TOKEN != \"\":\n returnedDictionary[\"link\"] = self.runOnIBMQ()\n \n return returnedDictionary",
"def create_book_result_dicts(\n book_entries: List[dict],\n data_interval_start: pendulum.DateTime,\n data_interval_end: pendulum.DateTime,\n organisation_name: str,\n) -> Dict[dict]:\n book_results = {}\n for entry in book_entries:\n pagepath = entry[\"dimensions\"][0]\n pagetitle = entry[\"dimensions\"][1]\n average_time = float(entry[\"metrics\"][0][\"values\"][-1])\n book_result = {\n \"url\": pagepath,\n \"title\": pagetitle,\n \"start_date\": data_interval_start.strftime(\"%Y-%m-%d\"),\n \"end_date\": data_interval_end.strftime(\"%Y-%m-%d\"),\n \"average_time\": average_time,\n \"unique_views\": {\"country\": {}, \"referrer\": {}, \"social_network\": {}},\n \"page_views\": {\"country\": {}, \"referrer\": {}, \"social_network\": {}},\n \"sessions\": {\"country\": {}, \"source\": {}},\n }\n # add custom dimension data for ANU Press\n if organisation_name == GoogleAnalyticsTelescope.ANU_ORG_NAME:\n # matches dimension order in 'list_all_books'\n custom_dimensions = {\n \"publication_id\": entry[\"dimensions\"][2],\n \"publication_type\": entry[\"dimensions\"][3],\n \"publication_imprint\": entry[\"dimensions\"][4],\n \"publication_group\": entry[\"dimensions\"][5],\n \"publication_whole_or_part\": entry[\"dimensions\"][6],\n \"publication_format\": entry[\"dimensions\"][7],\n }\n book_result = dict(book_result, **custom_dimensions)\n book_results[pagepath] = book_result\n\n return book_results"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for add_or_update_case | def test_add_or_update_case(self):
pass | [
"def test_update_case(self):\n pass",
"def test_update_record(self):\n pass",
"def test_update(self):\n pass",
"def test_update_entry(self):\n pass",
"def test_update_scenario(self):\n pass",
"def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_update_lookup(self):\n pass",
"def test_update_housekeeping(self):\n pass",
"def test_update_collection(self):\n pass",
"def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_cases_partial_update(self):\n pass",
"def test_missing_update_fields(self):\n manager = self.manager_class()\n m = self.model.create(value='blah')\n resp = manager.update(dict(id=m.id), dict(value='duh', id='blah'))\n self.assertDictEqual(dict(value='duh', id=m.id), resp)\n m = self.model.filter(id=m.id).get()\n self.assertDictEqual(dict(value=m.value, id=m.id), resp)\n self.assertRaises(DoesNotExist, self.model.filter(id='blah').get)",
"def test_unit_update(self):\n pass",
"def test_update(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Candy\")\n assert n_updated == 1\n items = list(test_store.get_by())\n\n candy.age = 15\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_cases_update(self):\n pass",
"def test_update_entry_correspondence(self):\n pass",
"def test_partial_update_order(self):\n pass",
"def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for delete_case | def test_delete_case(self):
pass | [
"def test_cases_delete(self):\n pass",
"def test_delete_run(self):\n pass",
"def test_unit_delete(self):\n pass",
"def test_delete(self):\n pass",
"def test_delete1(self):\n pass",
"def test_delete_record(self):\n pass",
"def test_delete7(self):\n pass",
"def test_delete_records(self):\n pass",
"def test_delete_goal(self):\n pass",
"def test_delete_item_using_delete(self):\n pass",
"def test_delete_occurrence(self):\n pass",
"def test_delete_stage_using_delete(self):\n pass",
"def test_delete_transaction_pattern(self):\n pass",
"def test_delete_instance(self):\n pass",
"def test_delete_alert(self):\n pass",
"def test_delete_rule(self):\n pass",
"def test_delete_transaction_code_using_delete(self):\n pass",
"def test_delete_policy(self):\n pass",
"def test_delete_activity(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for get_case_by_id | def test_get_case_by_id(self):
pass | [
"def test_filter_by_case_id(self):\r\n one = self.factory.create(name=\"Foo 1\")\r\n rs = self.F.RunSuiteFactory.create(run=one)\r\n sc = self.F.SuiteCaseFactory.create(suite=rs.suite)\r\n self.factory.create(name=\"Foo 2\")\r\n\r\n res = self.get(params={\"filter-case\": str(sc.case.id)})\r\n\r\n self.assertInList(res, \"Foo 1\")\r\n self.assertNotInList(res, \"Foo 2\")",
"def test_filter_by_case_id(self):\r\n one = self.factory.create(name=\"Foo 1\")\r\n sc = self.F.SuiteCaseFactory.create(suite=one)\r\n self.factory.create(name=\"Foo 2\")\r\n\r\n res = self.get(params={\"filter-case\": str(sc.case.id)})\r\n\r\n self.assertInList(res, \"Foo 1\")\r\n self.assertNotInList(res, \"Foo 2\")",
"def get_case(\n case_id: str,\n db: Session = Depends(get_db),\n) -> Any:\n case_and_site = crud.case.get_case_with_site(db, id=case_id)\n if not case_and_site:\n return None\n (case, site) = case_and_site\n return schemas.CaseWithTaskInfo.get_case_with_task_info(case, site)",
"def get_case(self, key: str):\n case = self.cases.get(key)\n if not hasattr(case, 'case_id'):\n message = \"get_case(): Case key {} does not have a case_id\"\n logmessage(message.format(key))\n else:\n logmessage(\"get_case(): \" + \"Retrieved case {}\".format(str(case)))\n return case",
"def view_cases(context,case_id):\n\n adapter = context.obj['adapter']\n\n if case_id is not None:\n results = adapter.find_case({'case_id': case_id})\n\n else:\n results = adapter.find_cases({})\n\n click.echo(pprint(results))",
"def case(self, case_id=None):\n cases = self.cases()\n if case_id:\n for case in cases:\n if case.case_id == case_id:\n return case\n else:\n if cases:\n return cases[0]\n\n return None",
"def test_filter_by_id(self):\n rcv1 = self.F.RunCaseVersionFactory.create(caseversion__name=\"Case 1\")\n self.F.RunCaseVersionFactory.create(caseversion__name=\"Case 2\")\n\n res = self.get(params={\"filter-id\": rcv1.caseversion.case.id})\n\n self.assertInList(res, \"Case 1\")\n self.assertNotInList(res, \"Case 2\")",
"def test_filter_by_id(self):\r\n rcv1 = self.F.RunCaseVersionFactory.create(caseversion__name=\"Case 1\")\r\n self.F.RunCaseVersionFactory.create(caseversion__name=\"Case 2\")\r\n\r\n res = self.get(params={\"filter-id\": rcv1.caseversion.case.id})\r\n\r\n self.assertInList(res, \"Case 1\")\r\n self.assertNotInList(res, \"Case 2\")",
"def test_filter_by_id(self):\r\n cv1 = self.F.CaseVersionFactory.create(name=u\"Case 1 ùê\")\r\n self.F.CaseVersionFactory.create(name=u\"Case 2 ùê\")\r\n\r\n res = self.get(params={\"filter-id\": cv1.case.id})\r\n\r\n self.assertInList(res, u\"Case 1 ùê\")\r\n self.assertNotInList(res, u\"Case 2 ùê\")",
"def test_get_by_id(self):\n\n result = Contact.get_by_id(3)\n self.assertEqual(result, Contact.objects.get(id=3))",
"def get_case(self, case_id: str) -> Union[int, None]:\n for index, case in enumerate(self.cases):\n if case.id == case_id:\n return index\n return None",
"def test_volleyballcoachs_id_get(self):\n pass",
"def test_get_story_by_id(self):\n pass",
"def case(self, case):\n LOG.debug(\"Getting case {0} from database\".format(case.get(\"case_id\")))\n case_id = case[\"case_id\"]\n return self.db.case.find_one({\"case_id\": case_id})",
"def case(self, case):\n logger.debug(\"Getting case {0} from database\".format(\n case.get('case_id')))\n case_id = case['case_id']\n return self.db.case.find_one({'case_id': case_id})",
"def run_by_case_id(case_log_id, test_case_id, user_id):\n case_log = TestCaseLog.query.filter_by(\n test_case_log_id=case_log_id).first()\n test_case = TestCase.query.filter_by(test_case_id=test_case_id).first()\n test_suite_id = test_case.test_suite_id\n res = run_test(case_log, test_case)\n return {\"status\": True, \"result\": res}",
"def test_supremecourtoftheunitedstatescases_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/supremecourtoftheunitedstatescases/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_get_recipe_by_id(self):\n recipe = self.request_mgr.get_recipe_by_id(35354)\n self.assertIn(\"Guinness\", recipe.get('title'))",
"def get_case(self, case_id, to=None, endpoint=None, codes_allowed=[200, ],\n ret_obj=False, **kw):\n assert all([to, endpoint])\n url = '{}case/{}'.format(endpoint, case_id)\n headers = {'Content-type': 'application/json'}\n req = Request('GET', url, headers=headers, auth=self.auth).prepare()\n\n rv = self._http_call(req, to=to, codes_allowed=codes_allowed,\n ret_obj=ret_obj)\n if ret_obj:\n return rv\n\n return rv.headers.get('location'), rv.json()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for get_cases_for_dict | def test_get_cases_for_dict(self):
pass | [
"def test_map(self):\n\n test_cases = [\n Case(\n description=\"lists of objects\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[\"foo\", \"bar\", \"baz\"],\n ),\n Case(\n description=\"missing argument\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"too many arguments\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\", \"\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"missing property\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"heading\": \"baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[\"foo\", \"bar\", None],\n ),\n Case(\n description=\"value not an array\",\n val=123,\n args=[\"title\"],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"array contains non object\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, 5, []],\n args=[\"title\"],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[\"title\"],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"undefined argument\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=[None, None, None],\n ),\n ]\n\n self._test(Map, test_cases)",
"def test_cases_list(self):\n pass",
"def test_dict(self, obj: dict) -> None:\r\n properties = read_properties(obj)\r\n for key, value in properties.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if read_type(value) == 'object':\r\n logger.debug('dict -> dict')\r\n self.test_dict(obj=value)\r\n elif read_type(value) == 'array':\r\n logger.debug('dict -> list')\r\n self.test_list(array=value)",
"def check_for_dict(check):",
"def test_jobs_dict(get_data): # Provided by professor\n assert len(get_data) >= 100\n assert type(get_data[1]) is dict",
"def test_dict(self, dictionary: dict) -> None:\r\n if not isinstance(dictionary, dict):\r\n raise ValueError(f'Expected dictionary, but received {type(dictionary)}')\r\n for key, value in dictionary.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if isinstance(value, dict):\r\n self.test_dict(dictionary=value)\r\n elif isinstance(value, list):\r\n self.test_list(items=value)",
"def test_createDictionary(testData):\n from outputter import createDictionary\n metrics = createDictionary(testData)\n check = True\n if metrics.get(\"mean_hr_bpm\") != 12:\n check = False\n if metrics.get(\"voltage_extremes\") != (0.0, 100.0):\n check = False\n if metrics.get(\"duration\") != 10:\n check = False\n if metrics.get(\"num_beats\") != 2:\n check = False\n if np.array_equal(metrics.get(\"beats\"), np.array([3.5, 7.5])) is not True:\n check = False\n assert check is True",
"def process_cases(cases):\n logger.info(\"Processing cases:\")\n\n case_dict = {}\n if not cases:\n logger.debug(\"PlotConfig: ... none specified\")\n return case_dict\n\n # cases1 = [\"act=arg_comp\", ]\n # result1 = {'act': ['arg_comp', ]}\n # cases2 = [\"act=arg_comp\", \"lev=30\"]\n # result2 = {'act':['arg_comp', ], 'lev': ['30', ]}\n # cases3 = [\"act=arg_comp,nenes_comp\", ]\n # result1 = {'act': ['arg_comp', 'nenes_comp']}\n # cases4 = [\"act=arg_comp,nenes_comp\", \"lev=30,28\"]\n # result2 = {'act':['arg_comp', 'nenes_comp',], 'lev': ['30', '28',]}\n\n for i, case in enumerate(cases, 1):\n dim, vals = case.split(\"=\")\n vals = vals.split(\",\")\n logger.info(\" {}) {} = {}\".format(i, dim, vals))\n case_dict[dim] = map(str, vals)\n\n return case_dict",
"def test_fn_call_with_dict():\n l = [1, 2, 3, 4, 5]\n ds = [defaultdict(int), defaultdict(int), defaultdict(int)]\n for d in ds:\n for fn in [s7.div, s7.mul, s7.add, \"abcd\", 1234]:\n try:\n f = s7.count_fn_called_with_dict(dict_=d, fn=fn)\n for i in range(0, random.randint(2, 10)):\n f(*l)\n assert fn in d.keys() and d[fn] == (i + 1)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__",
"def test_sample_mapped_keys(self):\r\n\r\n # With num_coverage=1 only the keys will be sampled\r\n actual = sample_mapped_keys(self.test_map, 1)\r\n self.assertEqual(actual, {'1': ['1'], '2': ['2']})\r\n\r\n actual = sample_mapped_keys(self.test_map, 3)\r\n for key in actual.keys():\r\n # check number of sampled keys\r\n self.assertEqual(3, len(actual[key]))\r\n for x in actual[key]:\r\n # check that sampled key is in the full list\r\n correct = list(self.test_map[key])\r\n correct.append(key)\r\n self.assertTrue(x in correct)",
"def test_fairness_returns(self):\n expected_dict = \\\n {\n 0 : 0.0,\n 25 : 0.5,\n 50 : 1.0,\n 75 : 0.5,\n 100 : 0.0,\n }\n for value, expected in expected_dict.iteritems():\n self.assertEqual(processFairness(value), expected)",
"def tests(test_cases, function):\n\tfor key in test_cases.keys():\n\t\tif function(key) == test_cases[key]:\n\t\t\tprint ('Tested for {} and got {}, which is correct!'.format(key[0][0], test_cases[key]))\n\t\telse:\n\t\t\tprint ('-------------------------------------------------------')\n\t\t\tprint ('ISSUE with {}'.format(key[0][0]))\n\t\t\tprint ('Expected {} and instead got {}!'.format(test_cases[key], function(key)))\n\t\t\tprint ('-------------------------------------------------------')",
"def test_create_dicts():\n\n expected = {\n \"First Name\": \"John\",\n \"Last Name\": \"Smith\",\n \"Age\": 12,\n \"Gender\": \"Male\",\n \"Diagnosis\": \"Something wrong\",\n \"TSH results\": [2, 2, 2, 2, 2, 2],\n }\n\n result = create_dicts([\"John Smith\"], [12], [\"Male\"], [\"Something wrong\"],\n [[2, 2, 2, 2, 2, 2]])\n\n assert expected == result[0]",
"def test_test_values():\n run_test(\"test_cases\")",
"def test_get_dict_value(basic_dict_value, basic_dict):\n acc = Accessor(getter=basic_dict_get)\n assert acc.get(basic_dict) == basic_dict_value",
"def test_mimic_dict():\n assert type(mimic_dict(PATH)) == dict\n print('testing function mimic_dict() passed!')",
"def test_cases_read(self):\n pass",
"def test_dict_validator_has_instructions():\n assert DictValidator._instructions",
"def test_comparing(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]), test[1])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for get_sync_history | def test_get_sync_history(self):
pass | [
"def test_tracker_getHistory():\n\n trackers, cap = init_tracker()\n tr = trackers[0]\n tr.addHistory([1, 1, 1, 1])\n\n assert tr.getHistory()[1] == [1, 1, 1, 1]",
"def test_add_history(self):\n pass",
"def test_update_task_runner_history(self):\n pass",
"def test_get_team_history(self):\n pass",
"def test_get_registration_launch_history(self):\n pass",
"def test_get_ticket_history(self):\n pass",
"def test_get_ticket_history_0(self):\n pass",
"def test_update_workflow_history(self):\n pass",
"def test_search_task_runner_history(self):\n pass",
"def test_search_workflow_history(self):\n pass",
"def test_get_alert_history(self):\n pass",
"def test_history(self):\n km = self.km\n km.start_kernel()\n kc = km.client()\n kc.start_channels()\n kc.wait_for_ready()\n kc.execute(\"1\")\n kc.history(hist_access_type=\"tail\", n=1)\n msg = kc.shell_channel.get_msgs()[-1]\n assert msg[\"header\"][\"msg_type\"] == \"history_reply\"\n history = msg[\"content\"][\"history\"]\n assert len(history) == 1\n assert history[0][2] == \"1\"",
"def testGetHistory(self):\n self.maxDiff = None\n container_obj = self.explorer_object.GetContainer(\n 'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')\n expected = {\n '1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125':\n collections.OrderedDict({\n 'size': 0\n }),\n 'df557f39d413a1408f5c28d8aab2892f927237ec22e903ef04b331305130ab38':\n collections.OrderedDict({\n 'created_at':\n '2018-12-26T08:20:42.687925+00:00',\n 'container_cmd': '/bin/sh -c #(nop) ADD file:ce026b62356eec3ad1214f92be2c9dc063fe205bd5e600be3492c4dfb17148bd in / ',\n 'size': 1154361\n })\n }\n\n self.assertEqual(expected, container_obj.GetHistory())",
"def partialRefreshHistoryList(self):\n RCI.instance().historyTasks()",
"def test_get_registration_instance_launch_history(self):\n pass",
"def history(self):\n return _ncofdm_swig.add_cp_sync_sptr_history(self)",
"async def test_retrieve_history_orders_by_ticket(self):\n history_orders = {\n 'historyOrders': [{\n 'clientId': 'TE_GBPUSD_7hyINWqAlE',\n 'currentPrice': 1.261,\n 'currentVolume': 0,\n 'doneTime': '2020-04-15T02:45:06.521Z',\n 'id': '46214692',\n 'magic': 1000,\n 'platform': 'mt5',\n 'positionId': '46214692',\n 'state': 'ORDER_STATE_FILLED',\n 'symbol': 'GBPUSD',\n 'time': '2020-04-15T02:45:06.260Z',\n 'type': 'ORDER_TYPE_BUY',\n 'volume': 0.07\n }],\n 'synchronizing': False\n }\n client.get_history_orders_by_ticket = AsyncMock(return_value=history_orders)\n actual = await api.get_history_orders_by_ticket('46214692')\n assert actual == history_orders\n client.get_history_orders_by_ticket.assert_called_with('accountId', '46214692')",
"def history():",
"def test_projects_history(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for update_case | def test_update_case(self):
pass | [
"def test_cases_update(self):\n pass",
"def test_update_scenario(self):\n pass",
"def test_update(self):\n pass",
"def test_add_or_update_case(self):\n pass",
"def test_update_record(self):\n pass",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_unit_update(self):\n pass",
"def test_update_catering(self):\n pass",
"def test_update_entry(self):\n pass",
"def test_update_process(self):\n pass",
"def test_update9(self):\n pass",
"def test_cases_partial_update(self):\n pass",
"def test_update_entry_result(self):\n pass",
"def test_update_entry_detail(self):\n pass",
"def test_update_base_test(self):\n s = yield self.getReadyDB()\n a = yield s.dInsert('foobar', value='foo')\n b = yield s.dInsert('foobar', value='bar')\n c = yield s.dUpdate('foobar', where='value=$value', vars={'value':'foo'}, value='bar')\n self.assertEqual(c, 1)\n c = yield s.dUpdate('foobar', where='value=$value', vars={'value':'bar'}, value='sam')\n self.assertEqual(c, 2)",
"def test_update_state1(self):\n pass",
"def test_update_state4(self):\n pass",
"def test_update_verification_flow(self):\n pass",
"def test_update_function_booking(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unset key from the encryptor and decryptor | def unset_cipher(self, key_name=None):
if key_name is None:
if self.key_name is not None:
message_key_types.unset_cipher(self.key_name)
if self.pending_key_name is not None:
message_key_types.unset_cipher(self.pending_key_name)
else:
message_key_types.unset_cipher(key_name) | [
"def CryptDestroyKey(self):\n\n return self.__del__()",
"def clear_key(self, key):\r\n return self.handler.clear_key(key_to_code(key))",
"def stop_crypto(self):\n self.clear_bitmask(0x08, 0x08)\n self.authed = False",
"def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'",
"def delkey(confirm, pub):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n mph.wallet.removePrivateKeyFromPublicKey(pub)\n set_shared_morphene_instance(stm)",
"def test_revoke_key(self):\n self.fail(\"test not implemented\")",
"def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)",
"def removeAllKeys(self) -> None:\n ...",
"def test_remove_pass(self):\n TEST_PASS = \"weakpass\"\n # Generate a test key with a password\n key = RSA.gen_key(2048, 5, callback=lambda: None)\n key_pem = key.as_pem(cipher='aes_256_cbc',\n callback=lambda x: TEST_PASS)\n # Now try to decrypt the key with the helper function\n key_out = SSHKeyUtils.remove_pass(key_pem, TEST_PASS)\n # Check returned key looks OK and is password-less\n self.assertIn('BEGIN RSA PRIVATE KEY', key_out)\n key_back_in = RSA.load_key_string(key_out,\n callback=lambda x: None)\n # Finally, test with wrong password\n self.assertRaises(RSA.RSAError, SSHKeyUtils.remove_pass,\n key_pem, \"wrong\")",
"def tearDown(self):\n if self.keypair_creator:\n self.keypair_creator.clean()\n\n try:\n os.remove(pub_file_path)\n except:\n pass\n\n try:\n os.remove(priv_file_path)\n except:\n pass",
"def main(self):\n self.key = self.read_key()\n cipher_bin = self.to_bin(False)\n self.decrypt(cipher_bin)",
"def request_clear_key_events(self) -> None:\n self.clear_keys = True",
"def test_delete_secret_key(self):\n pass",
"def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'",
"def deregister(self, key):\n del self.keyreg[key]",
"def key_decrypted(self):\n if not self.key:\n return \"\"\n return crypto.dump_privatekey(crypto.FILETYPE_PEM, self._pkey()).decode('utf-8')",
"def deinit_keyring():\n global updater\n if updater is not None:\n updater.deinit()\n updater = None",
"def remove(key):",
"def remove_key(self,key):\n public_key = key\n try: public_key = key.public_key()\n except: pass\n\n serialized = public_key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = serialized.split(None,2)[1]\n data = b64decode(blob)\n\n message = WriteMessage()\n message.write_uint8(constants.request.SSH_AGENTC_REMOVE_IDENTITY)\n message.write_binary(data)\n self.connection.send_message(message.data)\n self._await_operation_result()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set timer for key revocation | def _set_delete_timer(self, key_name, timeout):
if key_name is not None:
#print("(%d) _set_delete_timer:" % int(time.time()), key_name.hex()[:10], timeout)
query_management.QueryEntry(expire_after=timeout, callback_expire=remove_old_key,
data={KeyType.hint: key_name}, retry_count=0) | [
"def __updateElapsedTime(self):\n if self._keyCodeTime != 0.0 and \\\n (globalClock.getFrameTime() - self._keyCodeTime) >= self._timeout:\n self.notify.debug(\"Key code timed out. Resetting...\")\n self.reset()\n messenger.send(KeyCodes.CLEAR_CODE_EVENT)\n self._keyCodeTime = globalClock.getFrameTime()",
"def invalidate(key=None):",
"def revoke(self):\n self.revoked = True\n self.revocation_date = datetime.datetime.utcnow()",
"def _StopRenewal(self):\r\n if self._timeout is not None:\r\n IOLoop.current().remove_timeout(self._timeout)\r\n self._timeout = None\r\n self._renewing = False",
"def revoke_refresh_token(cls, jti: str) -> None:\n redis = cls._conn_redis(cls)\n expired_time = int(timedelta(days=cls._REFRESH_TOKEN_EXPIRES).total_seconds())\n redis.setex(jti,expired_time,'true')",
"def _expire_item(self, key):\r\n (timeout, callback) = self._timeouts[key]\r\n now = time.time()\r\n if timeout <= now:\r\n item = dict.pop(self, key)\r\n del self._timeouts[key]\r\n if callback:\r\n try:\r\n callback(key, item)\r\n except TypeError:\r\n try:\r\n callback(key)\r\n except TypeError:\r\n callback()\r\n return None\r\n else:\r\n return timeout - now",
"def resetar_timer(self, event=None):\n global timeout_id\n if timeout_id is not None:\n self.janela.after_cancel(timeout_id)\n timeout_id = self.janela.after(86400000, self.encerrar_sessao) # Faz o timeout depois de 10 minutos",
"def _expire_item(self, key):\n (timeout, callback) = self._timeouts[key]\n now = time.time()\n if timeout <= now:\n item = dict.pop(self, key)\n del self._timeouts[key]\n if callback:\n try:\n callback(key, item)\n except TypeError:\n try:\n callback(key)\n except TypeError:\n callback()\n return None\n else:\n return timeout - now",
"def stop(self,key):\n data = self._watch[key]\n if data[0]<0 : data[0] = time.time() - data[1]",
"def clean_timer(sc):\n global prev_dict_\n # Cleaning the previous dictionary after 5 hours\n prev_dict_ = {}\n z.enter(18000, 1, clean_timer, (sc,))",
"def _cancel_timeout(view):\n if hasattr(_cancel_timeout, \"previous_callback\"):\n _cancel_timeout.previous_callback.enabled = False\n\n def do_remove_status():\n if do_remove_status.enabled:\n _clear_prefix_key_mode(view)\n do_remove_status.enabled = True\n _cancel_timeout.previous_callback = do_remove_status\n\n press_time = int(settings().get(\"key_combination_time\") * 1000)\n sublime.set_timeout(do_remove_status, press_time)",
"def reset_timer(self):\r\n \r\n self.time = 0 \r\n self.clock = pg.time.Clock()",
"def test_rotate_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 80, 20, 19)\n assert key.audit_state == 'old'",
"def setDeactivationTime(*argv):",
"def sync_timeout(self):\n self.neighbor_state.sync_timer_expires(self)",
"def synictimer(self, synictimer):\n\n self._synictimer = synictimer",
"def let_timer_expire(context):\n _cancel_all_timers(context)\n emit_utterance(context.bus, \"set a 3 second timer\")\n expected_response = [\"started-timer\"]\n match_found, speak_messages = wait_for_dialog_match(context.bus, expected_response)\n assert match_found, format_dialog_match_error(expected_response, speak_messages)\n expected_response = [\"timer-expired\"]\n match_found, speak_messages = wait_for_dialog_match(context.bus, expected_response)\n assert match_found, format_dialog_match_error(expected_response, speak_messages)",
"async def _expire(self, key, ttl):\n return await self.client.touch(key, ttl)",
"def reset_timer():\n resetTimer = time.time()\n target_time.clear()\n target_time.append(resetTimer)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the presence for this channel | def presence(self, params=None, timeout=None):
params = params or {}
path = '/channels/%s/presence' % self.__name
return self.__ably._get(path, params=params, timeout=timeout).json() | [
"def presence(self):\n return self.slack_client.api_call(\"users.getPresence?user=\"+self.user_id)",
"def presence(self, *args, **kwargs):\n return self.send(xmpp.Presence(*args, **kwargs))",
"def get_presence(self):\n present_path = \"{}{}{}\".format(CPLD_I2C_PATH, 'present_', self.fan_index+1)\n val=self.__read_txt_file(present_path)\n if not self.is_psu_fan:\n return int(val, 10)\n else:\n return True",
"def getPlayerPresence(self, name):\n return self.thePresenceManager.getPresence(name)",
"def get_presence(self) -> typing.Optional[presences_.MemberPresence]:\n if not isinstance(self.user.app, traits.CacheAware):\n return None\n\n return self.user.app.cache.get_presence(self.guild_id, self.user.id)",
"def get_presence(self) -> dict[str, Any]:\r\n try:\r\n params = {\"type\": \"primary\"}\r\n response: dict[str, Any] = self._request_builder.get(\r\n url=f\"{BASE_PATH['profile_uri']}/{self.account_id}{API_PATH['basic_presences']}\",\r\n params=params,\r\n ).json()\r\n return response\r\n except PSNAWPForbidden as forbidden:\r\n raise PSNAWPForbidden(f\"You are not allowed to check the presence of user {self.online_id}\") from forbidden",
"def Presence(self, *args, **kwargs):\n return Presence(self, *args, **kwargs)",
"def channel_status(self, name):\n result = True\n\n cmd = 'CHANNEL STATUS %s' % (name)\n res = utils.agi_send(cmd)[1]\n\n if res == '-1':\n result = False\n\n return result, res",
"def presence(self):\n self.sparkle.send({\n 'event': 'luna-presence',\n 'incarnation': self.incarnation,\n })",
"def active_channel(self):\n return self._active_channel",
"def channel_ready(self):\n channel_ready_future = grpc.channel_ready_future(self._channel)\n return channel_ready_future",
"def get_resource_present(self):\n return self.__resource_present",
"def getChannelResponse(self):\n \n \n return self.channel_response",
"def customers_presence(self):\n return self._customers_presence",
"def connected_channel(self):\n if not self.channel_id:\n return None\n\n return self._bot.get_channel(int(self.channel_id))",
"async def check_na_channel(self, guild: discord.Guild):\n\n ch_id = await self.config.guild(guild).na_channel_id()\n\n if ch_id:\n return discord.utils.get(guild.text_channels, id=ch_id)\n return False",
"def presence_in_ladn(self):\n return self._presence_in_ladn",
"def channel_details(self):\n return self._channel_details",
"def get_presence(\n self, user: snowflakes.SnowflakeishOr[users.PartialUser]\n ) -> typing.Optional[presences_.MemberPresence]:\n if not isinstance(self.app, traits.CacheAware):\n return None\n\n return self.app.cache.get_presence(self.id, user)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing Assessment resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AssessmentArgs.__new__(AssessmentArgs)
__props__.__dict__["additional_data"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["links"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partners_data"] = None
__props__.__dict__["resource_details"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
return Assessment(resource_name, opts=opts, __props__=__props__) | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"additional_data\"] = None\n __props__[\"display_name\"] = None\n __props__[\"links\"] = None\n __props__[\"metadata\"] = None\n __props__[\"name\"] = None\n __props__[\"partners_data\"] = None\n __props__[\"resource_details\"] = None\n __props__[\"status\"] = None\n __props__[\"type\"] = None\n return Assessment(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = AssessmentArgs.__new__(AssessmentArgs)\n\n __props__.__dict__[\"azure_hybrid_use_benefit\"] = None\n __props__.__dict__[\"azure_location\"] = None\n __props__.__dict__[\"azure_offer_code\"] = None\n __props__.__dict__[\"azure_pricing_tier\"] = None\n __props__.__dict__[\"azure_storage_redundancy\"] = None\n __props__.__dict__[\"confidence_rating_in_percentage\"] = None\n __props__.__dict__[\"created_timestamp\"] = None\n __props__.__dict__[\"currency\"] = None\n __props__.__dict__[\"discount_percentage\"] = None\n __props__.__dict__[\"e_tag\"] = None\n __props__.__dict__[\"monthly_bandwidth_cost\"] = None\n __props__.__dict__[\"monthly_compute_cost\"] = None\n __props__.__dict__[\"monthly_storage_cost\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"number_of_machines\"] = None\n __props__.__dict__[\"percentile\"] = None\n __props__.__dict__[\"prices_timestamp\"] = None\n __props__.__dict__[\"scaling_factor\"] = None\n __props__.__dict__[\"sizing_criterion\"] = None\n __props__.__dict__[\"stage\"] = None\n __props__.__dict__[\"status\"] = None\n __props__.__dict__[\"time_range\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"updated_timestamp\"] = None\n return Assessment(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n implementation_effort: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n remediation_description: Optional[pulumi.Input[str]] = None,\n severity: Optional[pulumi.Input[str]] = None,\n threats: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n user_impact: Optional[pulumi.Input[str]] = None) -> 'AssessmentPolicy':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AssessmentPolicyState.__new__(_AssessmentPolicyState)\n\n __props__.__dict__[\"categories\"] = categories\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"implementation_effort\"] = implementation_effort\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"remediation_description\"] = remediation_description\n __props__.__dict__[\"severity\"] = severity\n __props__.__dict__[\"threats\"] = threats\n __props__.__dict__[\"user_impact\"] = user_impact\n return AssessmentPolicy(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n exclude_app_packages_from_cleanups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n package_cleanup: Optional[pulumi.Input[bool]] = None,\n reboot_after_use: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'InstanceProfile':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceProfileState.__new__(_InstanceProfileState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"exclude_app_packages_from_cleanups\"] = exclude_app_packages_from_cleanups\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"package_cleanup\"] = package_cleanup\n __props__.__dict__[\"reboot_after_use\"] = reboot_after_use\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return InstanceProfile(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n apply_only_at_cron_interval: Optional[pulumi.Input[bool]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n association_id: Optional[pulumi.Input[str]] = None,\n association_name: Optional[pulumi.Input[str]] = None,\n automation_target_parameter_name: Optional[pulumi.Input[str]] = None,\n compliance_severity: Optional[pulumi.Input[str]] = None,\n document_version: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n max_concurrency: Optional[pulumi.Input[str]] = None,\n max_errors: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n output_location: Optional[pulumi.Input[pulumi.InputType['AssociationOutputLocationArgs']]] = None,\n parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n schedule_expression: Optional[pulumi.Input[str]] = None,\n targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AssociationTargetArgs']]]]] = None,\n wait_for_success_timeout_seconds: Optional[pulumi.Input[int]] = None) -> 'Association':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AssociationState.__new__(_AssociationState)\n\n __props__.__dict__[\"apply_only_at_cron_interval\"] = apply_only_at_cron_interval\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"association_id\"] = association_id\n __props__.__dict__[\"association_name\"] = association_name\n __props__.__dict__[\"automation_target_parameter_name\"] = automation_target_parameter_name\n __props__.__dict__[\"compliance_severity\"] = compliance_severity\n __props__.__dict__[\"document_version\"] = document_version\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"max_concurrency\"] = max_concurrency\n __props__.__dict__[\"max_errors\"] = max_errors\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"output_location\"] = output_location\n __props__.__dict__[\"parameters\"] = parameters\n __props__.__dict__[\"schedule_expression\"] = schedule_expression\n __props__.__dict__[\"targets\"] = targets\n __props__.__dict__[\"wait_for_success_timeout_seconds\"] = wait_for_success_timeout_seconds\n return Association(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name, id, opts=None, arn=None, metric_name=None, name=None, predicates=None, tags=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"arn\"] = arn\n __props__[\"metric_name\"] = metric_name\n __props__[\"name\"] = name\n __props__[\"predicates\"] = predicates\n __props__[\"tags\"] = tags\n return Rule(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n comparison: Optional[pulumi.Input[str]] = None,\n created_at: Optional[pulumi.Input[int]] = None,\n critical: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionCriticalArgs']]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n entity_guid: Optional[pulumi.Input[str]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n updated_at: Optional[pulumi.Input[int]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionWarningArgs']]] = None,\n where: Optional[pulumi.Input[str]] = None) -> 'InfraAlertCondition':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InfraAlertConditionState.__new__(_InfraAlertConditionState)\n\n __props__.__dict__[\"comparison\"] = comparison\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"critical\"] = critical\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"entity_guid\"] = entity_guid\n __props__.__dict__[\"event\"] = event\n __props__.__dict__[\"integration_provider\"] = integration_provider\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"policy_id\"] = policy_id\n __props__.__dict__[\"process_where\"] = process_where\n __props__.__dict__[\"runbook_url\"] = runbook_url\n __props__.__dict__[\"select\"] = select\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"updated_at\"] = updated_at\n __props__.__dict__[\"violation_close_timer\"] = violation_close_timer\n __props__.__dict__[\"warning\"] = warning\n __props__.__dict__[\"where\"] = where\n return InfraAlertCondition(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get_state(api, entity_id):\n try:\n req = api(METH_GET, URL_API_STATES_ENTITY.format(entity_id))\n\n # req.status_code == 422 if entity does not exist\n\n return ha.State.from_dict(req.json()) \\\n if req.status_code == 200 else None\n\n except (HomeAssistantError, ValueError):\n # ValueError if req.json() can't parse the json\n _LOGGER.exception(\"Error fetching state\")\n\n return None",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['ConfigurationStoreEncryptionArgs']]] = None,\n endpoint: Optional[pulumi.Input[str]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ConfigurationStoreIdentityArgs']]] = None,\n local_auth_enabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_read_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStorePrimaryReadKeyArgs']]]]] = None,\n primary_write_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStorePrimaryWriteKeyArgs']]]]] = None,\n public_network_access: Optional[pulumi.Input[str]] = None,\n purge_protection_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n secondary_read_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStoreSecondaryReadKeyArgs']]]]] = None,\n secondary_write_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStoreSecondaryWriteKeyArgs']]]]] = None,\n sku: Optional[pulumi.Input[str]] = None,\n soft_delete_retention_days: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'ConfigurationStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ConfigurationStoreState.__new__(_ConfigurationStoreState)\n\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"endpoint\"] = endpoint\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"local_auth_enabled\"] = local_auth_enabled\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_read_keys\"] = primary_read_keys\n __props__.__dict__[\"primary_write_keys\"] = primary_write_keys\n __props__.__dict__[\"public_network_access\"] = public_network_access\n __props__.__dict__[\"purge_protection_enabled\"] = purge_protection_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"secondary_read_keys\"] = secondary_read_keys\n __props__.__dict__[\"secondary_write_keys\"] = secondary_write_keys\n __props__.__dict__[\"sku\"] = sku\n __props__.__dict__[\"soft_delete_retention_days\"] = soft_delete_retention_days\n __props__.__dict__[\"tags\"] = tags\n return ConfigurationStore(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Skill':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = SkillArgs.__new__(SkillArgs)\n\n __props__.__dict__[\"authentication_configuration\"] = None\n __props__.__dict__[\"skill_package\"] = None\n __props__.__dict__[\"vendor_id\"] = None\n return Skill(resource_name, opts=opts, __props__=__props__)",
"def get_assessment(self, assessment_id):\n return # osid.assessment.Assessment",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FhirStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FhirStoreArgs.__new__(FhirStoreArgs)\n\n __props__.__dict__[\"complex_data_type_reference_parsing\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"default_search_handling_strict\"] = None\n __props__.__dict__[\"disable_referential_integrity\"] = None\n __props__.__dict__[\"disable_resource_versioning\"] = None\n __props__.__dict__[\"enable_update_create\"] = None\n __props__.__dict__[\"fhir_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"notification_config\"] = None\n __props__.__dict__[\"notification_configs\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"stream_configs\"] = None\n __props__.__dict__[\"validation_config\"] = None\n __props__.__dict__[\"version\"] = None\n return FhirStore(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Stack':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = StackArgs.__new__(StackArgs)\n\n __props__.__dict__[\"agent_version\"] = None\n __props__.__dict__[\"attributes\"] = None\n __props__.__dict__[\"chef_configuration\"] = None\n __props__.__dict__[\"clone_app_ids\"] = None\n __props__.__dict__[\"clone_permissions\"] = None\n __props__.__dict__[\"configuration_manager\"] = None\n __props__.__dict__[\"custom_cookbooks_source\"] = None\n __props__.__dict__[\"custom_json\"] = None\n __props__.__dict__[\"default_availability_zone\"] = None\n __props__.__dict__[\"default_instance_profile_arn\"] = None\n __props__.__dict__[\"default_os\"] = None\n __props__.__dict__[\"default_root_device_type\"] = None\n __props__.__dict__[\"default_ssh_key_name\"] = None\n __props__.__dict__[\"default_subnet_id\"] = None\n __props__.__dict__[\"ecs_cluster_arn\"] = None\n __props__.__dict__[\"elastic_ips\"] = None\n __props__.__dict__[\"hostname_theme\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"rds_db_instances\"] = None\n __props__.__dict__[\"service_role_arn\"] = None\n __props__.__dict__[\"source_stack_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"use_custom_cookbooks\"] = None\n __props__.__dict__[\"use_opsworks_security_groups\"] = None\n __props__.__dict__[\"vpc_id\"] = None\n return Stack(resource_name, opts=opts, __props__=__props__)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Links relevant to the assessment | def links(self) -> pulumi.Output['outputs.AssessmentLinksResponse']:
return pulumi.get(self, "links") | [
"def links(self) -> 'outputs.AssessmentLinksResponse':\n return pulumi.get(self, \"links\")",
"def review_links():",
"def test_view(self):\n response = self.client.get(reverse('makeReports:assessment-summary',kwargs={'report':self.rpt.pk}))\n self.assertEquals(response.status_code,200)\n self.assertContains(response,self.assess.assessment.title)\n self.assertContains(response,self.assess2.assessment.title)\n self.assertNotContains(response, self.assessNotInRpt.assessment.title)",
"def getLink(self):",
"def test_link_to_manage_cases(self):\r\n s = self.factory.create(name=\"Foo\")\r\n\r\n res = self.get()\r\n\r\n self.assertElement(\r\n res.html,\r\n \"a\",\r\n href=\"{0}?filter-suite={1}\".format(\r\n reverse(\"manage_cases\"), str(s.id)\r\n )\r\n )",
"def link(self):\r\n\r\n # Take the link entires from TOML file\r\n schedules = cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n self.add_post(payload, API.url_link, self.links)",
"def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})",
"def insertLinks(self):\n isis = QtGui.QAction(self.menuInfo)\n isis.setText('ISIS')\n isis.triggered.connect(\n functools.partial(self.openLink,\n 'https://www.aug.ipp.mpg.de/cgibin/sfread_only/isis'))\n self.menuInfo.insertAction(self.menuAbout,isis)\n\n shotf = QtGui.QAction(self.menuInfo)\n shotf.setText( 'Shotfile system documentation')\n shotf.triggered.connect(\n functools.partial(self.openLink,\n 'https://www.aug.ipp.mpg.de/wwwaug/guidelines/shotfiles.shtml'))\n self.menuInfo.insertAction(self.menuAbout,shotf)\n\n libddww = QtGui.QAction(self.menuInfo)\n libddww.setText( 'libddww documentation')\n libddww.triggered.connect(\n functools.partial(self.openLink,\n 'https://www.aug.ipp.mpg.de/aug/manuals/pylibs/'))\n self.menuInfo.insertAction(self.menuAbout,libddww)",
"def instrument_links(instrument_id):\n db_links = db.session.query(InstrumentLink).filter(InstrumentLink.instrument_id == instrument_id).all()\n links = [dict(text=db_link.text, link=db_link.link) for db_link in db_links]\n db_instrument = db_select_instrument(instrument_id)\n return render_template(\"edit_links.html\", links=links, instrument=db_instrument, instrument_id=instrument_id)",
"def get_absolute_url(self):\n return reverse('clinicalTrial-detail', args=[str(self.trialId)])",
"def issueListing(self, v, i):\n #list of URLS within the issue\n# links = []\n issURL = self.link(vol = v, iss = i )\n html=urlopen(issURL)\n soup=BeautifulSoup(html,'html.parser')\n URLs = [] #Empty list\n \n# titles = soup.find_all('h5', class_=\"title\")\n# authors = soup.find_all('h6', class_=\"authors\")\n# pubs = soup.find_all('h6', class_=\"pub-info\")\n# for t, a, p in zip(titles, authors, pubs):\n blocks = soup.find_all('div', class_=\"article panel article-result\")\n for b in blocks:\n# print(b)\n titletag = b.find('h5', class_=\"title\")\n title = titletag.get_text()\n #Extract abstract url from title head\n aURL = titletag.find('a', href = True)['href']\n alink = 'https://journals.aps.org' + aURL\n #Print out the scraped information\n print(title)\n print(alink)\n #Extract research area and topic keywords\n kwlist = b.find('ul', class_=\"inline-list subjects\")\n #If the list tag exists\n if kwlist:\n lis = kwlist.find_all('li')\n kws = [li.get_text() for li in lis] \n print(kws)\n #Add utf-8 encode\n# print(kws.encode('utf-8')) \n print('----------------------------------------------------------------') \n #Collect URLs in the issue\n URLs.append('https://journals.aps.org' + aURL)\n return URLs",
"def links(request):\n assert isinstance(request, HttpRequest)\n return render(request,\n 'initial/links.html',\n context_instance = RequestContext(request,\n {'title':'Links',\n 'message':'Some useful links:',\n 'year':datetime.now().year,}))",
"def show_useful_links():\n return render_template('usefullinks.html')",
"def use_comparative_assessment_view(self):\n pass",
"def get_absolute_url(self):\n return reverse('student-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('subject-detail', args=[str(self.id)])",
"def get_list_link(self):",
"def test_demographics_links(self):\n self.selenium.find_element_by_link_text('Demographics').click()\n for group in DEMOGRAPHICS:\n self.selenium.find_element_by_link_text(group).click()\n self.assertEqual(self.selenium.title, \"Education: \"+group)\n self.selenium.execute_script(\"window.history.go(-1)\")\n self.selenium.find_element_by_link_text('Home').click()",
"def link_and_answer(self, url, answer):\n self.__out__(self.__call_core__('g_answer'))\n self.__out__('<i><p>' + answer + '</p></i>')\n link = '<a href=\"' + str(url) + '\" target=\"_blank\">link</a>'\n subject = '<b>'+url.split('/')[-1].split('.')[0].replace(\"-\",\" \")+'</b>'\n self.__out__(self.__call_core__('g_link').format(link=link, subject=subject))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Details of the resource that was assessed | def resource_details(self) -> pulumi.Output[Any]:
return pulumi.get(self, "resource_details") | [
"def resource_details(self) -> Any:\n return pulumi.get(self, \"resource_details\")",
"def resource(self):\n return self.__resource",
"def Description(self):\n return self.Resource.Description",
"def resource(self):\n return self._resource",
"def __str__(self):\n return self.__resource;",
"def __str__(self):\n return \"Resource [Name: {rname}][Topology: {top}]\".format(rname=self.name, top=self.topology)",
"def __str__(self):\n\n return str(self.__resource);",
"def __repr__(self):\n return '<Resource(Name=\"%s\", Code=\"%s\", Rate=\"%f\", ID=\"%s\")>' % (\n self.Name, self.Code, self.Rate, self.ID)",
"def PrintResource(resource):\n print resource.resource_id.text, resource.GetResourceType()",
"def resourceid(self):",
"def print_resource_details(self):\n print(\"Resources remaining:\")\n #FIXME this is BROKEN. If resource definitions are needed, they must be externally linked.\n #I.e. read a resource file or read from redis or use resource manager\n for resource in resources:\n e_key = self.keynamehelper.create_key_name(\"resource\", resource['node_id'])\n print(\"hgetall(e_key): {}\".format(self.redis.hgetall(e_key)))\n print(\"-\" * 20)\n print(\"\\n\")",
"def get_resource(self) -> str:\n return self.get_value(\"resource\") or \"\"",
"def info(self, resource, id):\n return self.request('/' + resource + '/' + str(id))",
"def __repr__(self):\n return '<ResourcePart(Name=\"%s\", ID=\"%s\")>' % (\n self.Name, self.ID)",
"def metadata(self):\r\n return resources.Metadata(self)",
"def get_resource(self):\n return self.order_item.resource",
"def __repr__(self):\n desc = \"Reference Info Object:\"\n desc += \"File Name: {f}\".format(f=self.fileName)\n desc += \"Reference FASTA File: {f}\".format(f=self.refFastaFile)\n desc += \"Reference Suffix Array File: {f}\".format(\n f=self.refSawriterFile)\n desc += \"Description: {d}\".format(d=self.desc)\n if self.adapterGffFile is not None:\n desc += \"Adapter GFF file: {f}\".format(f=self.adapterGffFile)\n return desc",
"def resource_definition(self):\n return self._resource_definition",
"def info(self) -> None:\n loader = self.loader(self)\n obj = loader.get_object_from_aws(self.app.pargs.pk)\n self.app.render({'obj': obj}, template=self.info_template)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get_type_for_key_path with Simple Key Path | def test_get_type_for_key_path_simple_path(test_schema):
assert get_type_for_key_path(test_schema, "Age") == "integer" | [
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def GetKeyByPath(self, key_path):",
"def test_key_type(self):\n self.assertEqual(redismod.key_type(\"key\"), \"A\")",
"def lookup_type(self, path):",
"def key_type(self) -> global___Type:",
"def test_key_typename():\n\n key = Key()\n\n check_type(key.cdata, \"dasi_key_t *\")",
"def test_type_path(self):\n self.assertEqual(type(storage._FileStorage__file_path), str)",
"def test_get_catalog_item_types_key(self):\n pass",
"def check_key(self, path: str) -> bool:",
"def test_constrained_by_key(self):\n obj = self.analyzer.get_object(['getProperty'])\n assert obj.params[1].type == 'K extends keyof T'",
"def GetSubkeyByPath(self, key_path):",
"def test_getKey_tmpfile(self):\n filename = self.mktemp()\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))",
"def test_get_transaction_types_key(self):\n pass",
"def load_key(self, type, keyid):\n pass",
"def map_key_input(key_type, is_boundable):\n\n if is_boundable:\n if key_type == \"spatial\":\n return \"SpatialKey\"\n elif key_type == \"spacetime\":\n return \"SpaceTimeKey\"\n else:\n raise Exception(\"Could not find key type that matches\", key_type)\n else:\n if key_type == \"spatial\":\n return \"ProjectedExtent\"\n elif key_type == \"spacetime\":\n return \"TemporalProjectedExtent\"\n else:\n raise Exception(\"Could not find key type that matches\", key_type)",
"def _load_key(client, entity_type, entity_id=None, parent_key=None):\n log('in load key')\n key = None\n if entity_id:\n log ('in load key if')\n key = client.key(entity_type, entity_id, parent=parent_key)\n else:\n # this will generate an ID\n key = client.key(entity_type)\n log('returning key')\n return key",
"def test_unit_get_by_path(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get_type_for_key_path with key path of one level deep | def test_get_type_for_key_path_depth_one_level(test_schema):
assert (
get_type_for_key_path(test_schema, "EmploymentInformation.OriginalHireDate")
== "string"
) | [
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def lookup_type(self, path):",
"def GetKeyByPath(self, key_path):",
"def GetSubkeyByPath(self, key_path):",
"def testRecurseKey(self):\n # Ensure with a depth of 1 we only return the root key.\n result = list(interface.RecurseKey(self._top_level_dict, depth=1))\n self.assertEquals(len(result), 1)\n\n # Trying again with depth limit of 2 this time.\n result = list(interface.RecurseKey(self._top_level_dict, depth=2))\n self.assertEquals(len(result), 3)\n\n # A depth of two should gives us root plus the two devices. Let's check.\n my_keys = []\n for unused_root, key, unused_value in result:\n my_keys.append(key)\n expected = set(['DeviceCache', '44-00-00-00-00-04', '44-00-00-00-00-02'])\n self.assertTrue(expected == set(my_keys))",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def _create_path(root, dict_type, path):\n for sub_path in path:\n if not isinstance(root.get(sub_path, None), dict):\n root[sub_path] = dict_type()\n\n root = root[sub_path]\n\n return root",
"def test_split_nested_class_from_key(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class$nested')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertEqual(part2, 'nested')",
"def test_constrained_by_key(self):\n obj = self.analyzer.get_object(['getProperty'])\n assert obj.params[1].type == 'K extends keyof T'",
"def __getitem__(self, key):\n path = self.path\n if self.path_is_string:\n path = [path]\n return path[key]",
"def check_key(self, path: str) -> bool:",
"def test_type_mapping(registry, item_type):\n with mappings_use_nested(False):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n assert 'properties' in mapping\n if item_type == 'TestingLinkTargetElasticSearch':\n assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here\n\n # check calculated properties on objects/arrays of objects are mapped correctly\n if item_type == 'TestingCalculatedProperties':\n assert mapping['properties']['nested']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'",
"def path_lookup(data_obj, xj_path, create_dict_path=False):\n\n if not xj_path or xj_path == '.':\n return data_obj, True\n\n res = list(split(xj_path, '.', maxsplit=1))\n top_key = res[0]\n leftover = res[1] if len(res) > 1 else None\n if top_key == '*':\n return _full_sub_array(data_obj, leftover, create_dict_path)\n elif top_key.startswith('@'):\n return _single_array_element(data_obj, leftover, top_key,\n create_dict_path)\n else:\n val_type, top_key = _clean_key_type(top_key)\n top_key = unescape(top_key)\n if top_key in data_obj:\n value = data_obj[top_key]\n if val_type is not None and not isinstance(value, val_type):\n raise XJPathError(\n 'Key %s expects type \"%s\", but found value type is \"%s\"' %\n (top_key, val_type.__name__, type(value).__name__))\n if leftover:\n return path_lookup(value, leftover, create_dict_path)\n else:\n return value, True\n else:\n if val_type is not None:\n if not isinstance(data_obj, dict):\n raise XJPathError('Accessed object must be a dict type '\n 'for the key: \"%s\"' % top_key)\n if create_dict_path:\n data_obj[top_key] = val_type()\n else:\n return None, False\n if leftover:\n return path_lookup(data_obj[top_key], leftover,\n create_dict_path)\n else:\n return data_obj[top_key], True\n return None, False",
"def test_type_mapping_nested(registry):\n with mappings_use_nested(True):\n mapping = type_mapping(registry[TYPES], 'TestingLinkTargetElasticSearch')\n assert mapping\n assert 'properties' in mapping\n # if type is defined on this field, it should beg object, NOT nested since it is not enabled on this field\n assert mapping['properties']['reverse_es'].get('type', 'object') == 'object'",
"def test_key_type(self):\n self.assertEqual(redismod.key_type(\"key\"), \"A\")",
"def test_key_typename():\n\n key = Key()\n\n check_type(key.cdata, \"dasi_key_t *\")",
"def get_key_recursive(key, config):\n if not isinstance(key, list):\n key = key.split(\"/\") # subdict indexing split using slash\n assert key[0] in config, f\"missing key '{key[0]}' in metadata dictionary: {config}\"\n val = config[key[0]]\n if isinstance(val, (dict, collections.OrderedDict)):\n assert len(key) > 1, \"missing keys to index metadata subdictionaries\"\n return get_key_recursive(key[1:], val)\n return int(val)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get_type_for_key_path with multi level key path | def test_get_type_for_key_path_multi_level(test_schema):
assert (
get_type_for_key_path(test_schema, "EmploymentInformation.Beneficiary.Name")
== "string"
) | [
"def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def GetKeyByPath(self, key_path):",
"def lookup_type(self, path):",
"def GetSubkeyByPath(self, key_path):",
"def test_add_keys_multiple_times(self):\n path = _path.Path.from_str(\"RootOper.Foo(*)\")\n with self.assertRaisesRegex(\n ValueError, \"Path element already has key information\"):\n path(4)",
"def test_get_catalog_item_types_key(self):\n pass",
"def test_key_type(self):\n self.assertEqual(redismod.key_type(\"key\"), \"A\")",
"def test_constrained_by_key(self):\n obj = self.analyzer.get_object(['getProperty'])\n assert obj.params[1].type == 'K extends keyof T'",
"def _create_path(root, dict_type, path):\n for sub_path in path:\n if not isinstance(root.get(sub_path, None), dict):\n root[sub_path] = dict_type()\n\n root = root[sub_path]\n\n return root",
"def __getitem__(self, key):\n path = self.path\n if self.path_is_string:\n path = [path]\n return path[key]",
"def testRecurseKey(self):\n # Ensure with a depth of 1 we only return the root key.\n result = list(interface.RecurseKey(self._top_level_dict, depth=1))\n self.assertEquals(len(result), 1)\n\n # Trying again with depth limit of 2 this time.\n result = list(interface.RecurseKey(self._top_level_dict, depth=2))\n self.assertEquals(len(result), 3)\n\n # A depth of two should gives us root plus the two devices. Let's check.\n my_keys = []\n for unused_root, key, unused_value in result:\n my_keys.append(key)\n expected = set(['DeviceCache', '44-00-00-00-00-04', '44-00-00-00-00-02'])\n self.assertTrue(expected == set(my_keys))",
"def check_key(self, path: str) -> bool:",
"def _is_generic_key(key):\n for prefix in [\n \"graph_rewriter_config\",\n \"model\",\n \"train_input_config\",\n \"train_config\",\n \"eval_config\"]:\n if key.startswith(prefix + \".\"):\n return True\n return False",
"def test_key_typename():\n\n key = Key()\n\n check_type(key.cdata, \"dasi_key_t *\")",
"def test_type_mapping(registry, item_type):\n with mappings_use_nested(False):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n assert 'properties' in mapping\n if item_type == 'TestingLinkTargetElasticSearch':\n assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here\n\n # check calculated properties on objects/arrays of objects are mapped correctly\n if item_type == 'TestingCalculatedProperties':\n assert mapping['properties']['nested']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def key_type(self) -> global___Type:"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get_type_for_key_path with invalid key path | def test_get_type_for_key_path_invalid_key_path(test_schema):
assert get_type_for_key_path(test_schema, "foo.bar") == None | [
"def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"",
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def test_key_type(self):\n self.assertEqual(redismod.key_type(\"key\"), \"A\")",
"def check_key(self, path: str) -> bool:",
"def test_get_return_template_with_invalid_type(self):\n with self.assertRaises(KeyError):\n get_return_template('USER')",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')",
"def GetKeyByPath(self, key_path):",
"def test_type_path(self):\n self.assertEqual(type(storage._FileStorage__file_path), str)",
"def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]",
"def lookup_type(self, path):",
"def test_invalid_key_format_type(self):\n kwargs = {'key_format_type': 'invalid'}\n self.assertRaisesRegex(\n TypeError,\n \"Key format type must be a KeyFormatType enumeration.\",\n payloads.GetRequestPayload,\n **kwargs\n )\n\n args = (payloads.GetRequestPayload(), 'key_format_type', 'invalid')\n self.assertRaisesRegex(\n TypeError,\n \"Key format type must be a KeyFormatType enumeration.\",\n setattr,\n *args\n )",
"def test_validate_with_invalid_key_format_type(self):\n key_format_type = \"invalid\"\n kwargs = {'key_format_type': key_format_type}\n\n self.assertRaisesRegex(\n TypeError, \"invalid key format type\", Digest, **kwargs)",
"def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)",
"def test_key_typename():\n\n key = Key()\n\n check_type(key.cdata, \"dasi_key_t *\")",
"def test_get_catalog_item_types_key(self):\n pass",
"def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})",
"def test_add_keys_multiple_times(self):\n path = _path.Path.from_str(\"RootOper.Foo(*)\")\n with self.assertRaisesRegex(\n ValueError, \"Path element already has key information\"):\n path(4)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluate and apply formatting on template, apply any art if provided. Any additional parameters are passed as extra variables to the template. The extra variables have priority when there's conflicting variable names. | def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str:
variables = self.__dict__
variables.update(kwargs)
template = CustomFormats().format(template, **variables)
if art:
art = art.format(nfo=template)
template = art
for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template):
# TODO: This if check is quite yucky, look into alternative options.
# Ideally a custom format spec would be great.
template = template.replace(
m.group(0),
m.group(2) if int(m.group(1)) else ""
)
template = "\n".join(map(str.rstrip, template.splitlines(keepends=False)))
return template | [
"def render( text, options, processed = None ):\n output = unicode(text)\n expr = re.compile( '(\\[+([^\\[\\]]+)\\]\\]?)' )\n results = expr.findall( output )\n curr_date = datetime.datetime.now()\n options_re = re.compile('(\\w+)\\(?([^\\)]+)?\\)?')\n \n if ( processed == None ):\n processed = []\n \n for repl, key in results:\n # its possible to get multiple items for processing\n if ( repl in processed ):\n continue\n \n # record the repl value as being processed\n processed.append(repl)\n \n # replace templated templates\n if ( repl.startswith('[[') and repl.endswith(']]') ):\n output = output.replace( repl, '[%s]' % key )\n continue\n \n # determine the main key and its options\n splt = key.split('::')\n key = splt[0]\n prefs = splt[1:]\n value = None\n \n # use the inputed options\n if ( key in options ):\n # extract the value\n value = options[key]\n \n # format a float\n if ( type(value) in (float, int) ):\n if ( prefs ):\n value = prefs[0] % value\n else:\n value = str(value)\n \n # convert date time values\n elif ( type(value) in (datetime.datetime,\n datetime.date,\n datetime.time) ):\n if ( not prefs ):\n date_format = '%m/%d/%y'\n else:\n date_format = prefs[0]\n prefs = prefs[1:]\n \n value = value.strftime(str(date_format))\n \n else:\n value = render(options[key], options, processed)\n \n # look for the built-in options\n elif ( key == 'date' ):\n value = curr_date\n \n if ( not prefs ):\n date_format = '%m/%d/%y'\n else:\n date_format = prefs[0]\n prefs = prefs[1:]\n \n value = value.strftime(str(date_format))\n \n # otherwise, continue\n else:\n continue\n \n # apply the prefs to the value\n if ( value and prefs ):\n \n for pref in prefs:\n result = options_re.match(pref)\n pref, opts = result.groups()\n \n if ( opts ):\n opts = [opt.strip() for opt in opts.split(',')]\n else:\n opts = []\n \n if ( 'lower' == pref ):\n value = value.lower()\n elif ( 'upper' == pref ):\n value = value.upper()\n elif ( 'upper_first' == pref ):\n value = value[0].upper() + value[1:]\n elif ( 'lower_first' == pref ):\n value = value[0].lower() + value[1:]\n elif ( 'camelHump' == pref ):\n value = camelHump(value)\n elif ( 'underscore' == pref ):\n value = underscore(value)\n elif ( 'capitalize' == pref ):\n value = capitalize(value)\n elif ( 'pluralize' == pref ):\n value = pluralize(value)\n elif ( 'words' == pref ):\n value = ' '.join(words(value))\n elif ( 'pretty' == pref ):\n value = pretty(value)\n \n elif ( 'replace' == pref ):\n if ( len(opts) == 2 ):\n value = value.replace(opts[0], opts[1])\n else:\n logger.warning('Invalid options for replace: %s', \n ', '.join(opts))\n \n elif ( 'slice' == pref ):\n if ( len(opts) == 2 ):\n value = value[int(opts[0]):int(opts[1])]\n else:\n logger.warning('Invalid options for slice: %s',\n ', '.join(opts))\n \n elif ( 'lstrip' == pref ):\n if ( not opts ):\n value = value.lstrip()\n else:\n for key in opts:\n if ( value.startswith(key) ):\n value = value[len(key):]\n \n elif ( 'rstrip' == pref ):\n if ( not opts ):\n value = value.rstrip()\n else:\n for key in opts:\n if ( value.endswith(key) ):\n value = value[:-len(key)]\n \n output = output.replace(repl, value)\n \n return output",
"def process_with_template(contents, params, fields):\n try:\n import jinja2\n except ImportError:\n printerr(\"\"\"Jinja2 is not installed: can't use template!\"\"\")\n sys.exit(1)\n fields['root_element'] = params['root_element']\n fields['contents'] = contents\n jinja2env = jinja2.Environment(loader=jinja2.FileSystemLoader('/'),\n trim_blocks=True)\n t = jinja2env.get_template(params['template_filename'])\n return t.render(data=fields)",
"def render(tmpl_name, **kwargs):",
"def part_render(self, attr, *a, **kw):\r\n style = kw.get('style', 'html')\r\n template = self.template(style)\r\n dt = template.get_def(attr)\r\n return unsafe(dt.render(thing = self, *a, **kw))",
"def insert_evaluate_variables(text, var_dict):\n if isinstance(text, list):\n text.insert(0, '{% load quest_render_tags %}')\n rndr_string = '\\n'.join(text)\n else:\n rndr_string = r'{% load quest_render_tags %} ' + text\n\n var_dict_rendered = {}\n for key, values in var_dict.iteritems():\n var_dict_rendered[key] = values[1]\n\n tmplte = Template(rndr_string)\n cntxt = Context(var_dict_rendered)\n return tmplte.render(cntxt)",
"def _template_formatting(field, inputs, inputs_dict_st):\n from .specs import MultiInputObj, MultiOutputFile\n\n # if a template is a function it has to be run first with the inputs as the only arg\n template = field.metadata[\"output_file_template\"]\n if callable(template):\n template = template(inputs)\n\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(r\"{\\w+}\", template)\n inp_fields_fl = re.findall(r\"{\\w+:[0-9.]+f}\", template)\n inp_fields += [re.sub(\":[0-9.]+f\", \"\", el) for el in inp_fields_fl]\n if len(inp_fields) == 0:\n return template\n\n val_dict = {}\n file_template = None\n\n for fld in inp_fields:\n fld_name = fld[1:-1] # extracting the name form {field_name}\n if fld_name not in inputs_dict_st:\n raise AttributeError(f\"{fld_name} is not provided in the input\")\n fld_value = inputs_dict_st[fld_name]\n if fld_value is attr.NOTHING:\n # if value is NOTHING, nothing should be added to the command\n return attr.NOTHING\n else:\n # checking for fields that can be treated as a file:\n # have type File, or value that is path like (including str with extensions)\n if isinstance(fld_value, os.PathLike) or (\n isinstance(fld_value, str) and \".\" in fld_value\n ):\n if file_template:\n raise Exception(\n f\"can't have multiple paths in {field.name} template,\"\n f\" but {template} provided\"\n )\n else:\n file_template = (fld_name, fld_value)\n else:\n val_dict[fld_name] = fld_value\n\n # if field is MultiOutputFile and some elements from val_dict are lists,\n # each element of the list should be used separately in the template\n # and return a list with formatted values\n if field.type is MultiOutputFile and any(\n [isinstance(el, (list, MultiInputObj)) for el in val_dict.values()]\n ):\n # all fields that are lists\n keys_list = [\n k for k, el in val_dict.items() if isinstance(el, (list, MultiInputObj))\n ]\n if any(\n [len(val_dict[key]) != len(val_dict[keys_list[0]]) for key in keys_list[1:]]\n ):\n raise Exception(\n f\"all fields used in {field.name} template have to have the same length\"\n f\" or be a single value\"\n )\n formatted_value = []\n for ii in range(len(val_dict[keys_list[0]])):\n val_dict_el = copy(val_dict)\n # updating values to a single element from the list\n for key in keys_list:\n val_dict_el[key] = val_dict[key][ii]\n\n formatted_value.append(\n _element_formatting(\n template, val_dict_el, file_template, keep_extension=keep_extension\n )\n )\n else:\n formatted_value = _element_formatting(\n template, val_dict, file_template, keep_extension=keep_extension\n )\n return formatted_value",
"def _element_formatting(template, values_template_dict, file_template, keep_extension):\n if file_template:\n fld_name_file, fld_value_file = file_template\n # splitting the filename for name and extension,\n # the final value used for formatting depends on the template and keep_extension flag\n name, *ext = Path(fld_value_file).name.split(\".\", maxsplit=1)\n filename = str(Path(fld_value_file).parent / name)\n # updating values_template_dic with the name of file\n values_template_dict[fld_name_file] = filename\n # if keep_extension is False, the extensions are removed\n if keep_extension is False:\n ext = []\n else:\n ext = []\n\n # if file_template is at the end of the template, the simplest formatting should work\n if file_template and template.endswith(f\"{{{fld_name_file}}}\"):\n # recreating fld_value with the updated extension\n values_template_dict[fld_name_file] = \".\".join([filename] + ext)\n formatted_value = template.format(**values_template_dict)\n # file_template provided, but the template doesn't have its own extension\n elif file_template and \".\" not in template:\n # if the fld_value_file has extension, it will be moved to the end\n formatted_value = \".\".join([template.format(**values_template_dict)] + ext)\n # template has its own extension or no file_template provided\n # the simplest formatting, if file_template is provided it's used without the extension\n else:\n formatted_value = template.format(**values_template_dict)\n return formatted_value",
"def apply_to(self, template):\n pass",
"def _substitute(template, fuzzer, benchmark):\n return template.format(fuzzer=fuzzer, benchmark=benchmark)",
"def render_string(self, template: str, **vars) -> str:",
"def format(self, rq, *args):\n return {\n 'title':'Okasha simple format templates',\n 'key1':'val1','key2':'val2','args':'/'.join(args)\n }",
"def BuildDescriptiveText(template, part, properties):\n def replacer(match):\n def appropriate_format(pval):\n if fmt:\n return fmt\n # Use '%f' for floats to cut off trailing zeroes.\n if isinstance(pval, float):\n return \"%f\"\n return \"%s\"\n\n def normalized(prop):\n x = prop.normalized[0]\n return x, appropriate_format(x)\n\n def asgiven(prop):\n x, u = prop.value\n return (x, u), \"%s%%s\" % appropriate_format(x)\n\n def decimal_comma(prop):\n return (\"%f\" % prop).replace(\".\", \",\")\n\n MODIFIER_MAP = {'normalized': normalized,\n 'asgiven': asgiven,\n }\n\n # Grep elements from the match object.\n field = match.groupdict().get('property')\n fmt = match.groupdict().get('format')\n modifier = MODIFIER_MAP.get(match.groupdict().get('modifier'), normalized)\n expr = match.groupdict(\"prop\").get('expr')\n # We expect to find the named field either in 'properties'\n # (first) or in 'part'. Otherwise we flag an error by\n # replacing '[field?]'\n if field in properties:\n prop = properties.get(field)\n elif field in part:\n prop = part.get(field)\n else:\n return \"[%s?]\" % field\n\n if is_pq(prop):\n prop, fmt = modifier(prop)\n pval = eval(expr)\n fmt = appropriate_format(pval)\n replacement = fmt % pval\n if not replacement or pval is None:\n replacement = \"?\"\n\n return replacement\n\n # Sloppily match the syntax [modifier(property)!expr|format] by a\n # more-or-less simple regular expression.\n return re.sub(r\"\\[\"\n r\"((?P<modifier>\\w+)\\()?\"\n r\"(?P<property>\\w+)\"\n r\"\\)?\"\n r\"(!(?P<expr>[^|]+))?\"\n r\"(\\|(?P<format>[^\\]]*))?\"\n r\"\\]\",\n replacer, template)",
"def format(*args, **kwargs):\n\n pass",
"def render(self, template, *args, **kwargs):\n self._render(template, sys.stdout, *args, **kwargs)",
"def render(template, format='html', **context):\n content_type(mimetypes.guess_type('.'+format)[0])\n template = environments[format].get_template('%s.jinja' % template)\n \n add_to_context(context,helpers)\n \n return template.render(**context)",
"def render(data_dict, template=None):",
"def template(self, data=None, settings=None):\r\n if settings is None:\r\n settings = {}\r\n ts = _.templateSettings\r\n _.defaults(ts, self.templateSettings)\r\n _.extend(settings, ts)\r\n\r\n # settings = {\r\n # \"interpolate\": self.templateSettings.get('interpolate'),\r\n # \"evaluate\": self.templateSettings.get('evaluate'),\r\n # \"escape\": self.templateSettings.get('escape')\r\n # }\r\n\r\n _.extend(settings, {\r\n \"escaper\": r\"\\\\|'|\\r|\\n|\\t|\\u2028|\\u2029\",\r\n \"unescaper\": r\"\\\\(\\\\|'|r|n|t|u2028|u2029)\"\r\n })\r\n\r\n src = self.obj\r\n #src = re.sub('\"', r'\\\"', src)\r\n #src = re.sub(r'\\\\', r\"\\\\\", src)\r\n ns = self.Namespace()\r\n ns.indent_level = 1\r\n\r\n def unescape(code):\r\n def unescapes(matchobj):\r\n a = re.sub(\"^[\\'\\\"]|[\\'\\\"]$\", \"\", (\"%r\" % matchobj.group(1)))\r\n # Python doesn't accept \\n as a key\r\n if a == '\\n':\r\n a = \"bn\"\r\n if a == '\\r':\r\n a = \"br\"\r\n if a == '\\t':\r\n a = \"bt\"\r\n if a == '\\u2028':\r\n a = 'bu2028'\r\n if a == '\\u2029':\r\n a = 'bu2029'\r\n return self.escapes[a]\r\n return re.sub(settings.get('unescaper'), unescapes, code)\r\n\r\n def escapes(matchobj):\r\n a = matchobj.group(0)\r\n # Python doesn't accept \\n as a key\r\n if a == '\\n':\r\n a = \"bn\"\r\n if a == '\\r':\r\n a = \"br\"\r\n if a == '\\t':\r\n a = \"bt\"\r\n if a == '\\u2028':\r\n a = 'bu2028'\r\n if a == '\\u2029':\r\n a = 'bu2029'\r\n return '\\\\' + self.escapes[a]\r\n\r\n def indent(n=None):\r\n if n is not None:\r\n ns.indent_level += n\r\n return \" \" * ns.indent_level\r\n\r\n def interpolate(matchobj):\r\n if getattr(str, 'decode', False):\r\n key = (matchobj.group(1).decode('string-escape')).strip()\r\n else:\r\n key = (bytes(matchobj.group(1), \"utf-8\").decode()).strip()\r\n return \"' + str(\" + unescape(key) + \" or '') + '\"\r\n\r\n def evaluate(matchobj):\r\n if getattr(str, 'decode', False):\r\n code = (matchobj.group(1).decode('string-escape')).strip()\r\n else:\r\n code = (bytes(matchobj.group(1), \"utf-8\").decode()).strip()\r\n if code.startswith(\"end\"):\r\n return \"')\\n\" + indent(-1) + \"ns.__p += ('\"\r\n elif code.endswith(':'):\r\n return \"')\\n\" + indent() + unescape(code) + \\\r\n \"\\n\" + indent(+1) + \"ns.__p += ('\"\r\n else:\r\n return \"')\\n\" + indent() + unescape(code) + \\\r\n \"\\n\" + indent() + \"ns.__p += ('\"\r\n\r\n def escape(matchobj):\r\n if getattr(str, 'decode', False):\r\n key = (matchobj.group(1).decode('string-escape')).strip()\r\n else:\r\n key = (bytes(matchobj.group(1), \"utf-8\").decode()).strip()\r\n return \"' + _.escape(str(\" + unescape(key) + \" or '')) + '\"\r\n\r\n source = indent() + 'class closure(object):\\n pass' + \\\r\n ' # for full closure support\\n'\r\n source += indent() + 'ns = closure()\\n'\r\n source += indent() + \"ns.__p = ''\\n\"\r\n #src = re.sub(\"^[\\'\\\"]|[\\'\\\"]$\", \"\", (\"%r\" % src))\r\n src = re.sub(settings.get(\"escaper\"), escapes, src)\r\n source += indent() + \"ns.__p += ('\" + \\\r\n re.sub(settings.get('escape'), escape, src) + \"')\\n\"\r\n source = re.sub(settings.get('interpolate'), interpolate, source)\r\n source = re.sub(settings.get('evaluate'), evaluate, source)\r\n\r\n if getattr(str, 'decode', False):\r\n source += indent() + 'return ns.__p.decode(\"string_escape\")\\n'\r\n else:\r\n source += indent() + 'return bytes(ns.__p, \"utf-8\").decode()\\n'\r\n\r\n f = self.create_function(settings.get(\"variable\")\r\n or \"obj=None\", source)\r\n\r\n if data is not None:\r\n return f(data)\r\n return f",
"def render_template(*args, **kwargs):\r\n params = {'cache_buster': cache_buster, 'user': {}, 'user_json': {}, 'PROD': PRODUCTION,\r\n 'static_route': 'http://cdn1.pythonhackers.com'}\r\n params.update(**kwargs)\r\n\r\n return template_render(*args, **params)",
"def _preprocess_template(template_string):\n\n # template_colors[i] = the color to replace with the food color in that\n # card's location in the template sheet\n template_colors = ('#ff0000', '#00ff00', '#0000ff',\n '#800000', '#008000', '#000080')\n\n for index, color in enumerate(template_colors):\n # {{ is escaped {\n template_string = re.sub(color, '{{{{ colors.{0} }}}}'.format(index), template_string)\n\n template_string = template_string.replace('|', '\"')\n\n return template_string"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an IMDB ID from either the media's global tags, or the config. Since IMDB IDs are required for this project, it will bug the user for one interactively if not found. | def get_imdb_id(self, imdb_id: Any) -> str:
if not imdb_id:
general_track = self.media_info.general_tracks[0].to_data()
imdb_id = general_track.get("imdb")
if not imdb_id:
print("No IMDB ID was provided but is required...")
while not imdb_id or not isinstance(imdb_id, str):
user_id = input("IMDB ID (e.g., 'tt0487831'): ")
if not self.IMDB_ID_T.match(user_id):
print(f"The provided IMDB ID {user_id!r} is not valid...")
print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').")
else:
imdb_id = user_id
return imdb_id | [
"def alternative_media_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"alternative_media_id\")",
"def imdb_id(title):\n pass",
"def get_mediatype_id(self, name: str) -> Optional[str]:\n result = self.conn.mediatype.get(filter={'name': name.strip()})\n\n if len(result) < 1:\n raise Exception(f\"No such media for {name} found, check your configuration\")\n elif len(result) > 1:\n raise Exception(f\"Ambiguous media '{name}' found, {len(result)} different medias\")\n\n if result:\n mediatypeid = result[0]['mediatypeid']\n else:\n mediatypeid = None\n\n return mediatypeid",
"def get_media_id_by_tag(self, tag):\n\n if self.login_status:\n try:\n if tag.startswith(\"l:\"):\n tag = tag.replace(\"l:\", \"\")\n self.logger.info(f\"Get Media by location: {tag}\")\n url_location = self.url_location % (tag)\n r = self.s.get(url_location)\n all_data = json.loads(r.text)\n self.media_by_tag = list(\n all_data[\"graphql\"][\"location\"][\"edge_location_to_media\"][\n \"edges\"\n ]\n )\n\n else:\n self.logger.debug(f\"Get Media by tag: {tag}\")\n url_tag = self.url_tag % (tag)\n r = self.s.get(url_tag)\n all_data = json.loads(r.text)\n self.media_by_tag = list(\n all_data[\"graphql\"][\"hashtag\"][\"edge_hashtag_to_media\"][\n \"edges\"\n ]\n )\n except Exception as exc:\n self.media_by_tag = []\n self.logger.warning(\"Except on get_media!\")\n self.logger.exception(exc)",
"def select_instant_messaging_id(self, cnx, im_name, logger=None):\n found = None\n cursor = cnx.cursor()\n query = \"SELECT id \" \\\n \"FROM instant_messaging \" \\\n \"WHERE name = %s\"\n arguments = [im_name]\n cursor.execute(query, arguments)\n\n row = cursor.fetchone()\n\n if row:\n found = row[0]\n else:\n if logger:\n logger.error(\"the instant messaging \" + im_name + \" does not exist\")\n\n cursor.close()\n return found",
"def ami_id(self) -> Optional[str]:\n return pulumi.get(self, \"ami_id\")",
"def _get_device_id_from_environment() -> str:\n\n return os.environ[\"GOOGLE_MUSIC_DEVICE_ID\"]",
"def id(self):\n return self.settings['your_botid']",
"def read_id():\n try:\n with nfc_open('usb') as tag:\n for record in tag.ndef.records:\n if record.type == 'urn:nfc:wkt:T' and record.text.startswith('spotify:'):\n return SpotifyId.from_string(record.text)\n except NfcError:\n pass\n return None",
"def get_app_id():\n return os.environ.get('YUMMLY_APP_ID')",
"def get_media_id(media_url):\n split_url = media_url.split(\"/\")\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png\n if split_url[-2] == \"media\":\n return split_url[-1]\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png\n else:\n #This is required for now due to the SDK parsing out the `/`s\n return \"%2F\".join(split_url[-3:])",
"def _parse_imdb_id(imdb_id):\n if isinstance(imdb_id, int):\n imdb_id = \"{}\".format(imdb_id)\n if imdb_id.startswith(\"tt\"):\n imdb_id = imdb_id[2:]\n return imdb_id",
"def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get(\"tmdb\")\n if not tmdb_id:\n print(\"Warning: No TMDB ID was provided...\")\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f\"The provided TMDB ID {tmdb_id!r} is not valid...\")\n print(\"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\")\n raise ValueError(\"Invalid TMDB ID\")\n return tmdb_id",
"def the_tvdb_dot_com_id(title):\n pass",
"def get_id(url):\n if \"gfycat\" in url:\n match = re.search(r'^https?:\\/\\/(?:www.)?gfycat.com\\/ifr\\/([A-Za-z0-9\\-_]+)(?:\\/)?(?:\\?.*)?$', url)\n if match:\n return match.group(1)\n return None",
"def get_id(conf_name: str=CONFIG_FILE) -> Optional[int]:\n with open(conf_name, 'r') as fobj:\n data = json.load(fobj)\n\n uid = data.get('id')\n\n assert uid is None or isinstance(uid, int), \\\n 'The user id must be an integer if it exists'\n\n return uid",
"def get_guid(config):\n try:\n return config.get(section, \"ComputerGUID\")\n except ConfigParserError:\n return None",
"def spotify_id_from_token(access_token: str) -> Optional[str]:\n if access_token is None:\n return None\n headers = {\"Authorization\": \"Bearer {}\".format(access_token)}\n response = requests.post(\"https://api.spotify.com/v1/me\", headers=headers)\n if response.status_code != 200:\n return None\n user = response.json()\n if \"id\" not in user:\n return None\n return user[\"id\"]",
"def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:\n try:\n # The type on postpath seems incorrect in Twisted 21.2.0.\n postpath: List[bytes] = request.postpath # type: ignore\n assert postpath\n\n # This allows users to append e.g. /test.png to the URL. Useful for\n # clients that parse the URL to see content type.\n server_name_bytes, media_id_bytes = postpath[:2]\n server_name = server_name_bytes.decode(\"utf-8\")\n media_id = media_id_bytes.decode(\"utf8\")\n\n # Validate the server name, raising if invalid\n parse_and_validate_server_name(server_name)\n\n file_name = None\n if len(postpath) > 2:\n try:\n file_name = urllib.parse.unquote(postpath[-1].decode(\"utf-8\"))\n except UnicodeDecodeError:\n pass\n return server_name, media_id, file_name\n except Exception:\n raise SynapseError(\n 404, \"Invalid media id token %r\" % (request.postpath,), Codes.UNKNOWN\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a TMDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. | def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:
if not tmdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tmdb_id = general_track.get("tmdb")
if not tmdb_id:
print("Warning: No TMDB ID was provided...")
return None
if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):
print(f"The provided TMDB ID {tmdb_id!r} is not valid...")
print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').")
raise ValueError("Invalid TMDB ID")
return tmdb_id | [
"def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get(\"tvdb\")\n if not tvdb_id:\n print(\"Warning: No TVDB ID was provided...\")\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f\"The provided TVDB ID {tvdb_id!r} is not valid...\")\n print(\"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\")\n raise ValueError(\"Invalid TVDB ID\")\n return int(tvdb_id)",
"def get_guid(config):\n try:\n return config.get(section, \"ComputerGUID\")\n except ConfigParserError:\n return None",
"def get_mediatype_id(self, name: str) -> Optional[str]:\n result = self.conn.mediatype.get(filter={'name': name.strip()})\n\n if len(result) < 1:\n raise Exception(f\"No such media for {name} found, check your configuration\")\n elif len(result) > 1:\n raise Exception(f\"Ambiguous media '{name}' found, {len(result)} different medias\")\n\n if result:\n mediatypeid = result[0]['mediatypeid']\n else:\n mediatypeid = None\n\n return mediatypeid",
"def _get_numeric_record_id(global_id):\n if re.match(r'[a-zA-Z]{2}\\d+$', str(global_id)) is not None:\n return int(global_id[2:])\n elif re.match(r'\\d+$', str(global_id)) is not None:\n return int(global_id)\n else:\n raise ValueError('{} is not a valid global ID'.format(global_id))",
"def get_imdb_id(self, imdb_id: Any) -> str:\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get(\"imdb\")\n if not imdb_id:\n print(\"No IMDB ID was provided but is required...\")\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f\"The provided IMDB ID {user_id!r} is not valid...\")\n print(\"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\")\n else:\n imdb_id = user_id\n return imdb_id",
"def get_id(conf_name: str=CONFIG_FILE) -> Optional[int]:\n with open(conf_name, 'r') as fobj:\n data = json.load(fobj)\n\n uid = data.get('id')\n\n assert uid is None or isinstance(uid, int), \\\n 'The user id must be an integer if it exists'\n\n return uid",
"def url2id(url):\n params = parse_qs(urlparse(url).query)\n if 'multiverseid' in params:\n return maybeInt(params['multiverseid'][0])\n else:\n return None",
"def getMBID(self,song):\n if not main.local['mbidsupport']:\n return None\n try:\n if song['location'] is None:\n return None\n except KeyError:\n return None\n\n try:\n log.debug(\"Reading ID3 tag...\")\n tmbid = mbid.getMBID(song['location'])\n if tmbid is not None:\n log.verb(\"Found MBID %s for \\\"%s\\\"\" % (tmbid,song['name']))\n return tmbid\n except Exception, err:\n log.error(\"An error occurred checking \\\"%s\\\" for an MBID [%s:%s]\" % (song['name'],sys.exc_info()[0],err))\n return None",
"def _parse_imdb_id(imdb_id):\n if isinstance(imdb_id, int):\n imdb_id = \"{}\".format(imdb_id)\n if imdb_id.startswith(\"tt\"):\n imdb_id = imdb_id[2:]\n return imdb_id",
"def get_device_id(self) -> str:\n return Config.get('device_id')",
"def read_id():\n try:\n with nfc_open('usb') as tag:\n for record in tag.ndef.records:\n if record.type == 'urn:nfc:wkt:T' and record.text.startswith('spotify:'):\n return SpotifyId.from_string(record.text)\n except NfcError:\n pass\n return None",
"def parse_id_arg (args, required=True):\n uid = args.get('id')\n if (uid is not None):\n try:\n num = int(uid)\n if (num > 0):\n return num\n except ValueError:\n pass # drop through to error\n\n if (not required):\n return None\n else:\n errMsg = \"A record ID must be specified, via the 'id' argument\"\n current_app.logger.error(errMsg)\n raise exceptions.RequestException(errMsg)",
"def configId(self) -> Optional[str]:\n return cast(Optional[str], self._properties.get('configId'))",
"def get_router_id(config):\n rid = router_id_cisco_regex.search(config)\n if rid is not None:\n return rid.group('address')\n else:\n return None",
"def get_frigate_instance_id(config: dict[str, Any]) -> str | None:\n\n # Use the MQTT client_id as a way to separate the frigate instances, rather\n # than just using the config_entry_id, in order to make URLs maximally\n # relatable/findable by the user. The MQTT client_id value is configured by\n # the user in their Frigate configuration and will be unique per Frigate\n # instance (enforced in practice on the Frigate/MQTT side).\n return cast(Optional[str], config.get(ATTR_MQTT, {}).get(ATTR_CLIENT_ID))",
"def _GetIdFromInstanceDirStr(instance_dir):\n match = _RE_LOCAL_INSTANCE_ID.match(instance_dir)\n if match:\n return match.group(\"ins_id\")\n\n # To support the device which is not created by acloud.\n if os.path.expanduser(\"~\") in instance_dir:\n return \"1\"\n\n return None",
"def alternative_media_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"alternative_media_id\")",
"def read_album_id(self):\n\n content = self.id_file.load_file()\n\n if Album.album_string in content:\n try:\n self.id = int(content[len(Album.album_string)-content.find(Album.album_string):])\n except Exception as e:\n call_error('Could not read album id from existing file...Setting to Miscellaneous', e, 'soft')\n self.id = Album.standard_album\n\n return self.id",
"def get_dataset_id(thing: object) -> t.DatasetId:\n if isinstance(thing, int):\n return t.DatasetId(thing)\n try:\n int_id = int(thing) # type: ignore\n return t.DatasetId(int_id)\n except ValueError:\n raise err.InvalidDatasetError(id=str(thing))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a TVDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. | def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:
if not tvdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tvdb_id = general_track.get("tvdb")
if not tvdb_id:
print("Warning: No TVDB ID was provided...")
return None
if isinstance(tvdb_id, int):
tvdb_id = str(tvdb_id)
if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):
print(f"The provided TVDB ID {tvdb_id!r} is not valid...")
print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').")
raise ValueError("Invalid TVDB ID")
return int(tvdb_id) | [
"def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get(\"tmdb\")\n if not tmdb_id:\n print(\"Warning: No TMDB ID was provided...\")\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f\"The provided TMDB ID {tmdb_id!r} is not valid...\")\n print(\"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\")\n raise ValueError(\"Invalid TMDB ID\")\n return tmdb_id",
"def get_guid(config):\n try:\n return config.get(section, \"ComputerGUID\")\n except ConfigParserError:\n return None",
"def load_volume_id( config, volume_name ):\n volume_cert = load_volume_cert( config, volume_name )\n if volume_cert is None:\n return None \n\n return volume_cert.volume_id",
"def get_frigate_instance_id(config: dict[str, Any]) -> str | None:\n\n # Use the MQTT client_id as a way to separate the frigate instances, rather\n # than just using the config_entry_id, in order to make URLs maximally\n # relatable/findable by the user. The MQTT client_id value is configured by\n # the user in their Frigate configuration and will be unique per Frigate\n # instance (enforced in practice on the Frigate/MQTT side).\n return cast(Optional[str], config.get(ATTR_MQTT, {}).get(ATTR_CLIENT_ID))",
"def get_device_id(self) -> str:\n return Config.get('device_id')",
"def get_router_id(config):\n rid = router_id_cisco_regex.search(config)\n if rid is not None:\n return rid.group('address')\n else:\n return None",
"def read_id():\n try:\n with nfc_open('usb') as tag:\n for record in tag.ndef.records:\n if record.type == 'urn:nfc:wkt:T' and record.text.startswith('spotify:'):\n return SpotifyId.from_string(record.text)\n except NfcError:\n pass\n return None",
"def get_mediatype_id(self, name: str) -> Optional[str]:\n result = self.conn.mediatype.get(filter={'name': name.strip()})\n\n if len(result) < 1:\n raise Exception(f\"No such media for {name} found, check your configuration\")\n elif len(result) > 1:\n raise Exception(f\"Ambiguous media '{name}' found, {len(result)} different medias\")\n\n if result:\n mediatypeid = result[0]['mediatypeid']\n else:\n mediatypeid = None\n\n return mediatypeid",
"def library_id(self):\n info_tag = self.getVideoInfoTag()\n if self.is_episode:\n return jsonrpc.episode_library_id(info_tag.getTVShowTitle(),\n info_tag.getSeason(),\n info_tag.getEpisode())\n elif self.is_movie:\n return jsonrpc.movie_library_id(info_tag.getTitle())\n return None",
"def _GetIdFromInstanceDirStr(instance_dir):\n match = _RE_LOCAL_INSTANCE_ID.match(instance_dir)\n if match:\n return match.group(\"ins_id\")\n\n # To support the device which is not created by acloud.\n if os.path.expanduser(\"~\") in instance_dir:\n return \"1\"\n\n return None",
"def get_video_id() -> str:\n return find_canvas_widget_by_name(\"video id\").get()",
"def the_tvdb_dot_com_id(title):\n pass",
"def _get_numeric_record_id(global_id):\n if re.match(r'[a-zA-Z]{2}\\d+$', str(global_id)) is not None:\n return int(global_id[2:])\n elif re.match(r'\\d+$', str(global_id)) is not None:\n return int(global_id)\n else:\n raise ValueError('{} is not a valid global ID'.format(global_id))",
"def url2id(url):\n params = parse_qs(urlparse(url).query)\n if 'multiverseid' in params:\n return maybeInt(params['multiverseid'][0])\n else:\n return None",
"def configId(self) -> Optional[str]:\n return cast(Optional[str], self._properties.get('configId'))",
"def _get_device_id_from_environment() -> str:\n\n return os.environ[\"GOOGLE_MUSIC_DEVICE_ID\"]",
"def getGUIDByBdcfg(configfile):\n generalDict, projectDict, solutionDict = Engine.readConfiguration(configfile)\n return projectDict['uuid']",
"def get_dataset_id(thing: object) -> t.DatasetId:\n if isinstance(thing, int):\n return t.DatasetId(thing)\n try:\n int_id = int(thing) # type: ignore\n return t.DatasetId(int_id)\n except ValueError:\n raise err.InvalidDatasetError(id=str(thing))",
"def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scrape Title Name and Year (including e.g. 2019) from IMDB | def get_title_name_year(self) -> Tuple[str, str]:
r = self.session.get(f"https://www.imdb.com/title/{self.imdb}")
if r.status_code != 200:
raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]")
imdb_page = html.unescape(r.text)
imdb_title = re.search(
# testing ground: https://regex101.com/r/bEoEDn/1
r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)"
r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>",
imdb_page
)
if not imdb_title:
raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...")
return imdb_title.group("name").strip(), imdb_title.group("year").strip() | [
"def scrape_movie_page(dom):\n # to save the information\n info = []\n\n # find the information block needed\n header = dom.find(\"div\", \"title_wrapper\")\n\n # find the title and strip the string\n name_dom = header.h1.get_text().encode(\"utf-8\")\n name = str(name_dom)[2:-16]\n info.append(name)\n\n # find the year and strip the year\n year_dom = header.h1.span.get_text().encode(\"utf-8\")\n year = str(year_dom)[3:-2]\n info.append(year)\n\n # find the duration and strip the string\n duration_dom = dom.find(\"time\", itemprop=\"duration\").get_text().encode(\"utf-8\")\n duration = str(duration_dom)[28:-23]\n info.append(duration)\n\n # find all the genres and strip the string\n genre_dom = dom.find(\"div\", itemprop=\"genre\").a.get_text().encode(\"utf-8\")\n genre = find_genres(genre_dom, dom)\n info.append(genre)\n\n # find all the directors and strip the string\n director_dom = dom.find(\"span\", itemprop=\"director\").get_text().encode(\"utf-8\")\n director = find_directors(director_dom, dom)\n info.append(director)\n\n # find all the writers and strip the string\n writer_dom = dom.find(\"span\", itemprop=\"creator\").a.get_text().encode(\"utf-8\")\n writer = find_writers(writer_dom, dom)\n info.append(writer)\n\n # find all the actors and strip the string\n actor_dom = dom.find(\"span\", itemprop=\"actors\").a.get_text().encode(\"utf-8\")\n actor = find_actors(actor_dom, dom)\n info.append(actor)\n\n # find the rating and strip the string\n rating_dom = dom.find(\"span\", itemprop=\"ratingValue\").get_text().encode(\"utf-8\")\n rating = str(rating_dom)[2:-1]\n info.append(rating)\n\n # find the number of ratings and strip the string\n number_ratings_dom = dom.find(\"span\", itemprop=\"ratingCount\").get_text().encode(\"utf-8\")\n number_ratings = str(number_ratings_dom)[2:-1]\n info.append(number_ratings)\n\n return info",
"def extract_movies(dom):\n\n # extract data per movie\n movies = dom.find_all('div', class_ = 'lister-item mode-advanced')\n\n # list to store scraped data\n movielist = []\n\n for movie in movies:\n\n # append extracted data to this dict\n moviedict = {}\n\n # scrape titles and add to dict\n moviedict['title'] = movie.h3.a.text\n\n # scrape ratings and add to dict\n moviedict['rating'] = float(movie.strong.text)\n\n # scrape year of release and add to dict\n year = movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')\n moviedict['year'] = re.findall('\\d+', year.text.strip('()'))[0]\n\n # scrape actors and add to dict\n actors = movie.find_all(href=re.compile(\"adv_li_st\"))\n actorlist = []\n for actor in actors:\n actorlist.append(actor.text)\n actorstring = ', '.join(actorlist)\n moviedict['actors'] = actorstring\n\n # scrape runtime and add to dict\n moviedict['runtime'] = movie.p.find('span', class_ = 'runtime').text.split(' ')[0]\n movielist.append(moviedict)\n\n\n # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT.\n\n return movielist # REPLACE THIS LINE AS WELL IF APPROPRIATE",
"def parse_imdb_page(html, year):\n soup = BeautifulSoup(html, 'html.parser')\n try:\n for movie in soup.find_all('div', class_='rating rating-list'):\n imdb = movie['id'].split('|')\n id_imdb = imdb[0]\n rating_imdb = imdb[2]\n return {'id_imdb': id_imdb, 'rating_imdb': float(rating_imdb)}\n except (TypeError, AttributeError):\n pass\n return {'id_imdb': '', 'rating_imdb': 0}",
"def find_movie(title, year, format=\"json\"):\n\t# http://www.omdbapi.com/?t=legend&y=2015&plot=short&r=json\n\tt = title.replace(\" \", \"+\")\n\turl = \"http://www.omdbapi.com/?t=\"+t+\"&y=\"+str(year)+\"&plot=full&r=\"+format\n\n\trequest = urllib2.Request(url)\n\tresponse = json.load(urllib2.urlopen(request))\n\tdata = json.dumps(response, indent=2)\n\tprint data\n\treturn data",
"def extract_movie_header(soup: BeautifulSoup) -> Tuple[str, str]:\n\n header = soup.find(\"h3\", class_=\"lister-item-header\")\n\n title = header.a.get_text()\n\n year = header.find(\"span\", class_=\"lister-item-year\").get_text()[-5:-1]\n year = int(year)\n\n return title, year",
"def extract_data_of_single_movie(tag):\r\n link = WEBSITE_URL + tag.h3.a['href']\r\n name = tag.h3.a.string\r\n try:\r\n poster = tag.find('img')['loadlate']\r\n except:\r\n poster = None\r\n try:\r\n genres = tag.p.find('span', class_='genre').string.strip()\r\n except AttributeError:\r\n genres = None\r\n try:\r\n runtime = tag.p.find('span', class_='runtime').string\r\n except AttributeError:\r\n runtime = None\r\n try:\r\n certificate = tag.p.find('span', class_='certificate').string\r\n except AttributeError:\r\n certificate = None\r\n try:\r\n rating = tag.find('strong').string\r\n except AttributeError:\r\n rating = None\r\n try:\r\n year = tag.find('span', class_=\"lister-item-year text-muted unbold\").string[-5:-1]\r\n except TypeError:\r\n year = None\r\n try:\r\n votes = tag.find('p', class_='sort-num_votes-visible').findAll('span')[1].string\r\n except:\r\n votes = 0\r\n try:\r\n director = tag.find('div', class_='lister-item-content').findAll('p')[2].find('a').string\r\n except AttributeError:\r\n director = None\r\n return [name, genres, link, runtime, certificate, rating, year, votes, director, poster]",
"def list_titles(genre):\n text = genre_html(genre)\n num_titles = text.count('title=')\n\n titles = []\n for i in range(num_titles):\n start = text.find('title=')\n end = text[start+7:].find('\">')\n title = text[start+7:start+end]\n titles.append(title)\n text = text[start+7:]\n\n return titles",
"def parse_movie_header(header_line: str):\n match = RE_TITLE.match(header_line)\n if match is None:\n msg = \"Title {} does not match\".format(header_line)\n raise ValueError(msg)\n g = match.groupdict()\n title = g['title']\n year = g['year']\n episode = \"\"\n return title, year, episode",
"def get_year_from_movielist_title(title):\n match = re.match(r'.*\\s+\\((\\d+)\\)', title)\n year = int(match.groups()[0])\n return year",
"def test_scrape_video_title(self):\n url = \"https://m.bilibili.com/video/BV1Qt411T7VS\"\n res = requests.get(url).text\n soup = BeautifulSoup(res, \"lxml\")\n title = rs.extract_video_title(soup)\n self.assertEqual(title.replace(\" \", \"\"), \"影流之主\")",
"def title_after_year(matches: List[str]) -> List[str]:\n years = list(map(get_year, movie_db))\n years.sort()\n end = years[-1]\n m = int(matches[0]) + 1\n r = []\n while m <= end:\n r = r + title_by_year([m])\n m = m + 1\n return r",
"def imdb_id_from_title(title):\n pattern = 'http://www.imdb.com/xml/find?json=1&nr=1&tt=on&q={movie_title}'\n url = pattern.format(movie_title=urllib.quote(title))\n r = requests.get(url)\n res = r.json()\n # sections in descending order or preference\n for section in ['popular','exact','substring']:\n key = 'title_' + section \n if key in res:\n return res[key][0]['id']",
"def get_movie_info(page: str, verbose:bool = True):\n\n def add_scoreInfo(pattern, raw_text, keyName):\n \"\"\"inner helper function to help add score information\n :param pattern: pattern to match\n :param raw_text: html text\n :param keyName: key name to be append to the dict\n \"\"\"\n match_pat = re.search(pattern, raw_text)\n if match_pat is None:\n info[keyName] = None\n else:\n info[keyName] = match_pat.group(1)\n\n info = dict() \n \n # verbose option\n if verbose:\n print('scraping main page')\n print('scraping url: ' + page)\n \n # make soup\n soup = _make_soup(page)\n \n if soup == '':\n return None\n \n else:\n ### extraction ###\n # movie id\n movieId = soup.find('a', href=re.compile('movieId=[0-9]+'))\n if movieId is None:\n info['movie_link'] = None\n else:\n movieId = re.search('movieId=([0-9]+)$', movieId[\"href\"])\n info['movie_link'] = '/m/'+ movieId.group(1)\n \n movieInfo= soup.find('script', type=\"application/ld+json\")\n if movieInfo is None:\n print('No movie information for this movie.')\n else:\n # movie name\n movieName = re.search('\"name\":\"?(.+?)\"?,\"', movieInfo.get_text())\n if movieName is None:\n info['movie_name'] = None\n else:\n info['movie_name'] = movieName.group(1)\n \n # rating\n rating = re.search('\"contentRating\":\"?(.+?)\"?,\"',movieInfo.get_text())\n if rating is None:\n info['rating'] = None\n else:\n info['rating'] = rating.group(1)\n \n # genre \n genre = re.search('\"genre\":\\[\"(.+?)\"\\]', movieInfo.get_text())\n if genre is None:\n info['genre'] = None\n else:\n info['genre'] = genre.group(1).replace('\"','')\n \n # directors\n directors = re.search('\"director\":(.+?),\"author\"', movieInfo.get_text())\n if directors is None:\n info['directors'] = None\n else:\n info['directors'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', directors.group(1)))\n \n # writers\n writers = re.search('\"director\":.+?\"author\":(.+?),\"genre\"', movieInfo.get_text())\n if writers is None:\n info['writers'] = None\n else:\n info['writers'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', writers.group(1)))\n \n # movie synopsis\n movieSyno = soup.find('div', id=re.compile('movieSynopsis'))\n if movieSyno is None:\n info['movie_info'] = None\n else:\n info['movie_info'] = movieSyno.get_text().strip()\n \n # poster_image\n poster_img = soup.find('meta', property = re.compile('image$'))\n if poster_img is None:\n info['poster_image'] = None\n else:\n info['poster_image'] = poster_img[\"content\"]\n \n # cast\n casts = soup.find_all('div', class_=re.compile('^cast-item'))\n if casts is None:\n info['casts'] = None\n else:\n info['casts'] = ','.join([cast.find('span').get_text().strip() for cast in casts])\n \n # in_theaters_date\n in_theaters_date = soup.find('div', text=re.compile(\"In Theaters\"))\n if in_theaters_date is None:\n info['in_theaters_date'] = None\n else:\n info['in_theaters_date'] = in_theaters_date.find_next_sibling('div').find('time').get_text().strip()\n \n # on_streaming_date\n on_streaming_date = soup.find('div', text=re.compile(\"On Disc/Streaming:\"))\n if on_streaming_date is None:\n info['on_streaming_date'] = None\n else:\n info['on_streaming_date'] = on_streaming_date.find_next_sibling('div').find('time').get_text().strip()\n \n # runtime_in_minutes\n runtime_in_minutes = soup.find('div', text=re.compile(\"Runtime:\"))\n if runtime_in_minutes is None:\n info['runtime_in_minutes'] = None\n else:\n info['runtime_in_minutes'] = re.search('[0-9]+',runtime_in_minutes.find_next_sibling('div').find('time').get_text().strip()).group(0)\n # studio_name\n studio_name = soup.find('div', text=re.compile(\"Studio:\"))\n if studio_name is None:\n info['studio_name'] = None\n else:\n info['studio_name'] = studio_name.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n # Extra: box office\n box_office = soup.find('div', text=re.compile(\"Box Office:\"))\n if box_office is None:\n info['box_office'] = None\n else:\n info['box_office'] = box_office.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n scoreInfo = soup.find('script', type=\"text/javascript\")\n if scoreInfo is None:\n print('No score information for this movie.')\n else:\n pat_head1 = 'root.RottenTomatoes.context.scoreInfo.+?'\n pat_keywrd = '\"consensus\":'\n pat_tail1 = '\"?(.+?)\"?,\"'\n pat_tail2 = '\"?([0-9]+?)\"?,\"'\n pat_tail3 = '\"?([0-9\\.]+?)\"?,\"'\n # critics_consensus\n criticsCns_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(criticsCns_pat, scoreInfo.get_text(), 'critics_consensus')\n \n # tomatometer_status\n pat_keywrd ='\"tomatometerState\":'\n tmtStatus_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(tmtStatus_pat, scoreInfo.get_text(), 'tomatometer_status')\n\n # tomatometer_rating\n pat_keywrd = '\"score\":'\n tmtRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtRating_pat, scoreInfo.get_text(), 'tomatometer_rating')\n\n # tomatometer_count\n pat_keywrd ='\"numberOfReviews\":'\n tmtCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtCnt_pat, scoreInfo.get_text(), 'tomatometer_count')\n \n # audience_status\n audStatus_pat = 'root.RottenTomatoes.context.popcornMeterState.+?\"(.+?)\";'\n add_scoreInfo(audStatus_pat, scoreInfo.get_text(), 'audience_status')\n\n # Extra: audience_want_to_see\n audWantToSee_pat = 'root.RottenTomatoes.context.wantToSeeData.+?\"wantToSeeCount\":' + pat_tail2\n add_scoreInfo(audWantToSee_pat, scoreInfo.get_text(), 'audience_want_to_see_count')\n \n # audience_rating\n pat_keywrd = '\"audienceAll\".+?\"score\":'\n audRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRating_pat, scoreInfo.get_text(), 'audience_rating')\n\n # audience_count\n pat_keywrd = '\"audienceAll\".+?\"ratingCount\":'\n audCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audCnt_pat, scoreInfo.get_text(), 'audience_count')\n\n # audience_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"numberOfReviews\":'\n audTopCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audTopCritics_pat, scoreInfo.get_text(), 'audience_top_critics_count')\n \n # audience_fresh_critics_count\n pat_keywrd = '\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_critics_count')\n \n # audience_rotten_critics_count\n pat_keywrd = '\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_critics_count')\n\n # Extra: audience_fresh_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_top_critics_count')\n\n # Extra: audience_rotten_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_rotten_critics_count')\n \n # Extra: tomatometer_avg_rating\n pat_keywrd = '\"avgScore\":'\n tmtAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(tmtAvgRating_pat, scoreInfo.get_text(), 'tomatometer_avg_rating')\n\n # Extra: audience_top_critics_avg_rating\n pat_keywrd = '\"tomatometerTopCritics\".+?\"avgScore\":'\n audTopCriticsAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audTopCriticsAvgRating_pat, scoreInfo.get_text(), 'audience_top_critics_avg_rating')\n\n # Extra: Score Sentiment\n pat_keywrd = '\"scoreSentiment\":'\n scoreSentiment_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(scoreSentiment_pat, scoreInfo.get_text(), 'score_sentiment')\n\n # Extra: audience_avg_rating\n pat_keywrd = '\"averageRating\":'\n audienceAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audienceAvgRating_pat, scoreInfo.get_text(), 'audience_avg_rating')\n print('done scraping movie info')\n return info",
"def extract_movies(dom):\n # global movie list that will be returned\n movies_list = []\n\n # find all relevent tags for information about movie\n divs = dom.find_all(\"div\", {\"class\":\"lister-item-content\"})\n\n # distilles out of every div tag relevant information\n for div in divs:\n movie = []\n title = div.h3.a.string\n rating = div.div.div.strong.string\n release = div.h3.find(\"span\", {\"class\" : \"lister-item-year text-muted unbold\"}).string.strip(\"()\")\n\n # makes shure that sequels will be read properly\n if \"I\" in release:\n sequel, release = release.split()\n release = release.strip(\"()\")\n title = title + \" \" + sequel\n\n # put relevant informartion in list as a single movie\n movie.append(title)\n movie.append(rating)\n movie.append(release)\n directors_and_actors = div.find_all(\"p\", {\"class\" : \"\"})[1]\n\n # distilles all actors out of directors_and_actors list\n for actors in directors_and_actors:\n actors_list = []\n if \"Stars\" in actors:\n actors = actors.next.string\n actors_list.append(actors.strip())\n while True:\n actors = actors.next.string\n if not actors == None:\n if actors.strip() == \",\":\n actors = actors.next.string\n actors_list.append(actors.strip())\n else:\n break\n movie.append(actors_list)\n # get runtime\n runtime = div.find(\"p\", {\"class\" : \"text-muted\"}).find(\"span\", {\"class\" : \"runtime\"}).string\n time, min = runtime.split()\n movie.append(int(time))\n\n # fill movie list with movies\n movies_list.append(movie)\n\n return movies_list",
"def get_movie_details(raw_name):\n #Strip dots which are normally used instead of spaces\n replace_pattern = re.compile(r\"\\.\")\n clean_name = replace_pattern.sub(' ', raw_name)\n \n torrent_types = ['1080p', '\\\\d{4}[^p]', '720p', 'DVDRip', 'R5', 'DVDSCR', 'BDRip', '\\\\s+CAM', '\\\\sTS\\\\s', 'PPV']\n name_pattern = re.compile(r'((LiMiTED\\s*)?\\(?\\[?(' + '|'.join(torrent_types) + r')\\)?\\]?)', re.IGNORECASE) \n name_match = name_pattern.split(clean_name)\n if not name_match:\n return None, None\n movie_name = name_match[0]\n #If we didnt take anything out of the name than there wasnt a match\n if movie_name == clean_name:\n return None, None\n \n #Check for p so we dont hit 1080p\n year_pattern = re.compile(r'\\d{4}(?=[^p])')\n #logging.info('Left over: %s', clean_name[len(movie_name):])\n year_match = year_pattern.search(clean_name[len(movie_name):])\n movie_year = None\n if year_match:\n movie_year = int(year_match.group(0).strip())\n \n return movie_name.strip(), movie_year",
"def extract_movies(dom):\n html_data = open(BACKUP_HTML, 'r')\n soup = BeautifulSoup(html_data, 'html.parser')\n title_list = retrieve_title(soup)\n rating_list = retrieve_rating(soup)\n year_list = retrieve_year(soup)\n actor_list = retrieve_actor(soup)\n runtime_list = retrieve_runtime(soup)\n return [title_list, rating_list, year_list, actor_list, runtime_list]",
"def get_coming_soon_movies(date_url_string):\n base_url = \"https://www.imdb.com\"\n url = \"https://www.imdb.com\" + date_url_string\n movies_list = []\n request = requests.get(url)\n if request.status_code == 200:\n soup = BeautifulSoup(request.content, features=\"html5lib\")\n item_lists = soup.find_all(\"div\", {\"class\": \"list_item\"})\n for item in item_lists:\n movie_context = {}\n\n # getting poster image\n poster_image_list = item.find_all(\"img\", {\"class\": \"poster\"})\n if poster_image_list:\n poster_image = poster_image_list[0].get(\"src\")\n movie_context[\"poster_image\"] = str(poster_image).strip()\n\n # Getting movie heading\n movie_heading_list = item.find_all(\"h4\")\n if movie_heading_list:\n movie_heading = str(movie_heading_list[0].text).strip()\n movie_context[\"movie_heading\"] = str(movie_heading).strip()\n\n # Getting movie link\n movie_link_list = item.find_all(\"a\")\n if movie_link_list:\n movie_href = movie_link_list[0].get(\"href\")\n movie_href = str(base_url + movie_href)\n movie_context[\"movie_href\"] = str(movie_href).strip()\n\n\n # Movie time\n movie_time_list = item.find_all(\"time\")\n if movie_time_list:\n time = movie_time_list[0].text\n movie_context[\"movie_time\"] = str(time).strip()\n\n\n # Getting movie types\n movie_type_list = item.find_all(\"p\", {\"class\": \"cert-runtime-genre\"})\n if movie_type_list:\n for movie_type in movie_type_list:\n movie_spans = movie_type.find_all(\"span\")\n list_of_text = [x.text for x in movie_spans]\n types_movie = \" \".join(list_of_text)\n movie_context[\"movie_types\"] = str(types_movie).strip()\n\n\n # Getting movie description\n movie_desc_list = item.find_all(\"div\", {\"class\": \"outline\"})\n if movie_desc_list:\n description = str(movie_desc_list[0].text).strip()\n movie_context[\"description\"] = str(description).strip()\n\n movies_list.append(movie_context)\n\n\n movie_list = [x for x in movies_list if x != {}]\n return {\n \"status\": True,\n \"movie_list\": list(movie_list),\n \"table_headings\": [\"Poster\", \"Name\", \"Time\", \"Types\", \"Description\", \"\"]\n }\n else:\n return {\n \"status\": False,\n \"message\": \"Oops! something went wrong. try again later!\"\n }",
"def get_movies_by_year(year):\n\n out = dict()\n h = httplib.HTTPConnection(SERVER)\n h.request('GET', 'http://'+SERVER+'/year/'+str(year))\n resp = h.getresponse()\n out = resp.read()\n return out",
"def title_details(titleid):\n\n related_titles = []\n # Initially setting details to 'None' to be able to check whether or not the title information should come from the Netflix\n # or IMDB API\n details = None\n\n # Making relevant API calls based on whether or not the title ID is a Netflix ID or IMDB ID\n if not titleid.startswith('tt'):\n # Send a GET request for the details of a specific title using its unique Netflix ID\n details = requests.get(\n url='https://unogsng.p.rapidapi.com/title', \n params={'netflixid': titleid}, \n headers=netflix_headers\n ).json()[\"results\"][0]\n\n # Send a GET request for the country availability related to a specific title using its unique Netflix ID\n countries = requests.get(\n url='https://unogsng.p.rapidapi.com/titlecountries', \n params={'netflixid': titleid}, \n headers=netflix_headers\n ).json()[\"results\"]\n\n # Send a GET request for the genres related to a specific title using its unique Netflix ID\n genres = requests.get(\n url='https://unogsng.p.rapidapi.com/titlegenres', \n params={'netflixid': titleid}, \n headers=netflix_headers\n ).json()[\"results\"]\n\n # Send a GET request for the IMDB ID of the Netflix title\n titleid = requests.get(\n url=\"https://imdb8.p.rapidapi.com/title/find\", \n params={\"q\": details['title']}, \n headers=imdb_headers\n ).json()['results'][0]['id'][7:]\n\n else:\n # Send a GET request for the info of a specific title using its unique IMDB ID\n imdb_title_info = requests.get(\n url=\"https://imdb8.p.rapidapi.com/title/get-overview-details\", \n params={\"tconst\": titleid}, \n headers=imdb_headers\n ).json()\n\n watch_options = requests.get(\n url=\"https://imdb8.p.rapidapi.com/title/get-meta-data\", \n params={\"ids\": titleid}, \n headers=imdb_headers\n ).json()[titleid]['waysToWatch']\n\n # Send a GET request for title ID's related to the searched title\n related_title_ids = requests.get(\n url=\"https://imdb8.p.rapidapi.com/title/get-more-like-this\", \n params={\"tconst\": str(titleid)}, \n headers=imdb_headers\n ).json()\n\n # Send a GET request for a trailer related to the searched title\n video = requests.get(\n url=\"https://imdb8.p.rapidapi.com/title/get-videos\", \n params={\"tconst\": titleid}, \n headers=imdb_headers\n ).json()['resource']\n\n if 'videos' in video:\n video['videos'][0]['id'] = video['videos'][0]['id'][9:]\n\n # Properly formatting the related title ID's , getting their basic info, and appending that info to the 'related_titles'\n # list\n for title in related_title_ids:\n title = title[7:]\n title_info = requests.get(\n url=\"https://imdb8.p.rapidapi.com/title/get-base\", \n params={\"tconst\": title}, \n headers=imdb_headers\n ).json()\n\n title_info['id'] = title_info['id'][7:]\n title_info['id'] = title_info['id'][:-1]\n\n related_titles.append(title_info)\n\n # Rendering the title info page with all needed information variables based on if the title came from the Netflix API or\n # IMDB API\n if details:\n # Send the resulting dictionary to a new page to display the details\n return render_template(\n 'title_details.html', \n details=details, \n countries=countries, \n genres=genres, \n related_titles=related_titles, \n video=video,\n watch_options=watch_options\n )\n else:\n return render_template(\n 'title_details.html', \n related_titles=related_titles, \n title_info=imdb_title_info, \n video=video,\n watch_options=watch_options \n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate total episode count based on neighbouring sameextension files. | def get_tv_episodes(self) -> int:
return len(glob.glob(os.path.join(
os.path.dirname(self.file),
f"*{os.path.splitext(self.file)[-1]}"
))) | [
"def increase_count_episodes(self):\n return self.sess.run(self.count_episodes_increase)",
"def calculate_how_many_episodes_to_play(self):\n episodes_to_play = self.hyperparameters[\"epsilon_decay_rate_denominator\"] / self.grammar_induction_iteration\n episodes_to_play = max(self.min_num_episodes_to_play, int(max(self.episodes_to_run_with_no_exploration * 2, episodes_to_play)))\n print(\"Grammar iteration {} -- Episodes to play {}\".format(self.grammar_induction_iteration, episodes_to_play))\n return episodes_to_play",
"def return_episode_num(name):\n return int(name.split(\".\")[0].split(\"ep_\")[1]) # Use split to return only the episode number needed to sort the files in increasing order",
"def get_episode_number(self, file_name: str) -> int:\n pattern = re.compile(r\"\\[NeSubs\\] (.+) - .*(\\d+)\")\n match = re.match(pattern, file_name)\n\n return int(match.group(2))",
"def _get_total_games(self) -> int:\n files = get_tfr_filenames(self.config)\n total_games = 0\n for file in files:\n total_games += int(str(file).split('-')[1].split('.')[0])\n return total_games",
"def get_num_episodes(self) -> int:\n return len(self.episodes)",
"def get_count_episodes(self):\n return self.sess.run(self.count_episodes)",
"def ComputeNumEvents(self):\n N = 0\n for name in self.subSeqName:\n N += self.subSequence[name].subEvents\n\n return N",
"def get_number_segments(folder):\n \n number_segments = 0 \n \n files = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f))]\n \n for f in files:\n\n with open(f, \"rb\") as f_opened:\n\n number_segments += len(pickle.load(f_opened))\n \n return number_segments",
"def find_n(self):\n k = len(self.prefix)\n only_files = [f for f in os.listdir(self.dir) if isfile(join(self.dir, f)) and f[:k] == self.prefix]\n return max([-1] + [int(x[k:k + 4]) for x in only_files]) + 1 # files has name with format prefxxxx.h5 - x is a number",
"def get_num_instances_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n file_frames = float(shape[0])\n if self.mode_last_patch == 'discard':\n # the last patch that is always incomplete is discarded\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n elif self.mode_last_patch == 'fill':\n # the last patch that is always incomplete will be filled with zeros or signal, to avoid discarding signal\n # hence we count one more patch\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, 1 + int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n return num_instances_per_file",
"def iter_cnt(filename):\n\n return ancestor_cnt(filename) + 1",
"def fileCounter(directory):",
"def _read_transition_statistics_from_files(model, verbose):\n total_steps = 0\n for site in model.sites:\n for anchor in site.anchors:\n if anchor.md == True and anchor.directory:\n #if verbose: print 'parsing md transitions for:Anchor', milestone.fullname\n #print info['max_steps']\n print('parsing md transitions for:Anchor', anchor.fullname)\n max_steps = anchor._parse_md_transitions()\n print(max_steps, total_steps)\n if max_steps > total_steps:\n total_steps = max_steps\n \n return total_steps",
"def num_episodes(self):\n return len(self._history['episode_length'])",
"def count(self, filename1, filename2):\n\n E.info(\"counting started for %s versus %s\" % (filename1, filename2))\n\n idx2 = self.buildIndex(filename2)\n\n (self.mExons1, self.mExonsOverlapping1,\n self.mBases1, self.mBasesOverlapping1) = self._count(filename1, idx2)\n\n self.mExonsUnique1 = self.mExons1 - self.mExonsOverlapping1\n self.mBasesUnique1 = self.mBases1 - self.mBasesOverlapping1\n\n idx1 = self.buildIndex(filename1)\n\n (self.mExons2, self.mExonsOverlapping2,\n self.mBases2, self.mBasesOverlapping2) = self._count(filename2, idx1)\n\n self.mExonsUnique2 = self.mExons2 - self.mExonsOverlapping2\n self.mBasesUnique2 = self.mBases2 - self.mBasesOverlapping2",
"def get_n_files_total(file_list):\n\n\t# Get the base path of the files\n\ts = file_list[0].split('/')\n\tpath = os.path.join(*s[:-2])\n\t# Initialize n_files with 0\n\tnfiles = 0\n\t# Loop over all files in file_list\n\tfor f in file_list:\n\t\t# Split the filename to retrieve the set number and running number\n\t\tsplitted = f.split(\"/\")[-1].split(\".\")[0].split(\"_\")\n\t\t# Reconstruct the file name of the pickle file that was used to create\n\t\t# the hdf5 file\n\t\tfile_name = \"i3Files_MC_{}_{}.pickle\".format(splitted[3], splitted[-1])\n\t\t# Combine base path and pickle name\n\t\tprep_path = os.path.join(path, \"prep_files\", file_name)\n\t\t# Open the pickle file and retrieve the number of i3files\n\t\twith open(prep_path) as p:\n\t\t\td = pickle.load(p)\n\t\t# Add number of i3files to total number of i3 files\n\t\tnfiles += len(d[\"i3_list\"])-1\n\treturn float(nfiles)",
"def get_num_episodes(self):\n return self.num_evaluation_episodes",
"def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve the release name based on the file used during MediaInfo. If a season was specified, but an episode number was not, it presumes the release is a Pack. Hence when pack, it uses the parent folder's name as the release name. | def get_release_name(self) -> str:
if self.season is not None and self.episode is None:
return os.path.basename(os.path.dirname(self.file))
return os.path.splitext(os.path.basename(self.file))[0] | [
"def extract_season(file_name):\n logging.debug(\"Extracting season from {0}\".format(file_name))\n\n season_part = file_name.split(\".\")[0].split(\"_\")[-1]\n season_out = season_part[:2] + \"/\" + season_part[-2:]\n\n return season_out",
"def get_distro_release_name():\n\n release = \"\"\n\n os_release_config = parse_os_release()\n if \"VERSION_CODENAME\" in os_release_config:\n release = os_release_config[\"VERSION_CODENAME\"]\n else:\n logging.debug(\n \"VERSION_CODENAME is not in /etc/os_release. Full file contents: %s\",\n os_release_config,\n )\n\n if release.lstrip() == \"\" or release is None:\n logging.warning(\"No valid release was detected\")\n\n return release",
"def get_current_season_name():\n month_nr = get_current_season()\n return get_season_name(month_nr)",
"def _get_sonic_release(self):\n\n output = self.command(\"sonic-cfggen -y /etc/sonic/sonic_version.yml -v release\")\n if len(output['stdout_lines']) == 0:\n # get release from OS version\n if self.os_version:\n return self.os_version.split('.')[0][0:6]\n return 'none'\n return output[\"stdout_lines\"][0].strip()",
"def get_openstack_release_name():\n openstack_release = os.getcwd().split('/')[-1]\n logging.debug('OpenStack release: {}'.format(openstack_release))\n if openstack_release not in OPENSTACK_RELEASES.keys():\n logging.error('Unable to determine OpenStack release name '\n 'from spec directory')\n return None\n else:\n return openstack_release",
"def getApplicationReleaseName(self) -> unicode:\n ...",
"def season(self):\n if self.game_id[3] == \"9\":\n return \"19\" + self.game_id[3] + self.game_id[4]\n else:\n return \"20\" + self.game_id[3] + self.game_id[4]",
"def media_season(self):\n return self._item.get(\"season\")",
"def project_name(self):\n if 'v59' in self.data['article']:\n return self.data['article']['v59'][0]['_']",
"def get_season_name(season_nr):\n season = Season.query.filter_by(id=season_nr).first()\n return season.name",
"def to_release_brach_name(self) -> str:\n return f\"release/{self.major}.{self.minor}\"",
"def get_season_number(file):\n\tmedia_info = MediaInfo.parse(file)\n\tfor track in media_info.tracks:\n\t\tif track.track_type == 'General':\n\t\t\tsynopsis = track.synopsis\n\n\t\t\t\"\"\"We assume that there won't be more than 99 episodes \n\t\t\tin a season here, so just trim the last two characters\n\t\t\tand what remains must be our season number. There has\n\t\t\tto be a smarter way.\"\"\"\n\t\t\tseason_num = synopsis[:-2]\n\t\t\treturn int(season_num)",
"def getReleaseVersion(self, workingTowerName, infixStream):\n towerInfix = iccs_apex.whatInfixIsStream(workingTowerName)\n prefixStream, postfixStream = string.split(workingTowerName, towerInfix)\n releaseVersion, postVersion = string.split(postfixStream, \"wrk\")\n releaseTowerName = infixStream + releaseVersion + \"rel\"\n \n return releaseTowerName",
"def season_folder(cls, season):\r\n\r\n\t\t'''# Google Drive downloads replace these characters automatically\r\n\t\t# I'm implementing this in the code as well for convenience\r\n\t\tseason = season.replace(\"&\", \"_\")\r\n\t\tseason = season.replace(\"'\", \"_\")'''\r\n\r\n\t\t# Folder names are ANSI versions of the season name\r\n\t\t# This is important in names like \"Lé Unicorn\" which get\r\n\t\t# converted incorrectly as folder names\r\n\t\tseason = season.encode(encoding=\"utf-8\")\r\n\t\tseason = season.decode(encoding=\"cp1252\", errors=\"ignore\")\r\n\r\n\t\treturn season",
"def _get_archive_name(info):\n match = re.search(r\"^NAME=(?P<name>.+)$\", info, re.MULTILINE)\n return match.group(\"name\") if match else None",
"def parse_season(filename):\n print_info('Attempting to parse {0}'.format(filename))\n print_info('Extracting season from {0}'.format(filename))\n for regex in SEASON_REGEX:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_season = m.group('Season').lower()\n print_info('Extracted season: {0}'.format(extracted_season))\n\n season_num = int(extracted_season)\n if season_num is not None and season_num > 0:\n print_info('Season might be: {0}'.format(season_num))\n return 'S' + format_num(season_num)\n return 'S01'",
"def getName(self):\n if self.torrentType == TorrentType.Serie:\n if self.isCompleteSeason:\n return \"{0} Season {1}\".format(self.name, self.season)\n else:\n return \"{0} S{1}E{2}\".format(self.name, self.season, self.episode)\n else:\n return self.name",
"def season(self):\n # In lieu of a Python enum type:\n return str(((int(self.published.strftime(\"%m\")) - 1) / 3) % 4)",
"def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a wide banner image from fanart.tv. Currently restricts banners to Englishonly. | def get_banner_image(self, tvdb_id: int) -> Optional[str]:
if not tvdb_id:
return None
if not self.fanart_api_key:
raise ValueError("Need Fanart.tv api key for TV titles!")
r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}")
if r.status_code == 404:
return None
res = r.json()
error = res.get("error message")
if error:
if error == "Not found":
return None
raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}")
banner = next((
x["url"] for x in (res.get("tvbanner") or [])
if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language
), None)
return banner | [
"def render_banner(self, width=300, height=85):\n img_path = IMG_PATH + os.sep + CARD_BANNER\n banner_img = Image.open(img_path)\n banner_img = banner_img.resize((width, height))\n return banner_img",
"def banner_img(self, instance):\n if instance.banner:\n url = instance.banner.url\n return format_html(f'<a href=\"{url}\"><img src=\"{url}\" style=\"max-width:160px;max-height:120px\"/></a>')\n\n return None",
"def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=\"{url}\" alt=\"{alt}\">'.format(\n url=banner_url,\n alt='Banner'\n )",
"def getBanner(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception', 404: 'Specified banner does not exist'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('banner')\n\t\treturn deserialize_Banner_json(payload)",
"def banner(self, *args, **kwargs) -> Banner:\n return self._retrieve_singular(self.banners, *args, **kwargs)",
"def get_bing_image_url():\n # TODO figure out what other locales Bing accepts\n locale = \"en-US\"\n url = f\"https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mbl=1&mkt={locale}\"\n req = urllib.request.Request(url=url)\n with urllib.request.urlopen(req) as resp:\n resp = json.loads(resp.read())\n url = \"https://bing.com\" + resp[\"images\"][0][\"url\"]\n return url",
"def _get_banner(self, btype):\n banner = []\n for r in self.tvdb._db.query(type='banner', parent=self._dbrow, btype=btype):\n entry = r.get('data')\n for key, value in entry.items():\n if key.lower().endswith('path'):\n entry[key] = self.tvdb.hostname + '/banners/' + str(value)\n entry.pop('BannerType')\n entry.pop('id')\n i = core.Image()\n i.url = entry['BannerPath']\n i.thumbnail = entry.get('ThumbnailPath', i.url)\n i.data = entry\n banner.append(i)\n banner.sort(lambda x,y: -cmp(float(x.data.get('Rating', 0)), float(y.data.get('Rating', 0))))\n return banner",
"def getBanner(outputScan):\n try:\n return str(outputScan.split(\", Banner: \", 1)[1][:12])\n #banner = re.search(r\"[0-9A-F]{12}\",outputScan, re.MULTILINE).group()\n #return str(banner)\n except Exception as e:\n print '\\033[91m'+\"ERROR_BANNER\"\n return \"BANNER_ERROR\"",
"def show_banner():\n from x84.bbs import showart, echo, getterminal\n import os\n\n artfile = os.path.join(os.path.dirname(__file__), 'art', 'main.ans')\n\n # displays a centered main menu header in topaz encoding for utf8\n for line in showart(artfile, 'topaz', center=True):\n echo(line)",
"def choose_banner(banners):\n # simple random\n n = random.randint(0, len(banners)-1)\n return banners[n]",
"def banner(self):\n from sage.misc.banner import banner_text\n return banner_text()",
"def get_banner(conn) -> str:\n banner_data = conn.recv(1024)\n banner = banner_data.decode().strip()\n print('Banner: {}'.format(banner))\n return banner",
"def banner(self):\n return self._banner",
"def draw_banners(self):\n\n # Banner dims\n BWIDTH, BHEIGHT = 400, 100\n BMIDDLE = BHEIGHT//2\n MID_PADDING = 10\n ROWS = math.ceil(len(self.banners)/2)\n IMGW = BWIDTH*2+MID_PADDING\n IMGH = ROWS * (BHEIGHT + MID_PADDING) - MID_PADDING\n\n # Create new canvas\n background = Image.new(\n mode=\"RGBA\",\n size=(IMGW, IMGH),\n color=self.card_background\n )\n font = ImageFont.truetype(f\"assets{sep}Roboto.ttf\", 30)\n\n for i, name in enumerate(self.banners):\n row = i // 2 * BHEIGHT + MID_PADDING * (i//2) # 00112233\n col = i % 2 * BWIDTH # 01010101\n\n if i % 2 == 1:\n col += MID_PADDING\n\n if i // 2 != 0:\n row + MID_PADDING\n\n banner = Image.open(f\"assets{sep}banners{sep}{name}.png\")\n banner = banner.resize((BWIDTH, BHEIGHT))\n d = ImageDraw.Draw(banner)\n\n _, texty = font.getsize(name)\n text_offset = (BWIDTH//10, BMIDDLE - texty//2)\n d.text(text_offset, name, font=font)\n\n background.paste(banner, (col, row))\n return background",
"async def banner(ctx, *, guild=None):\r\n if guild is None:\r\n guild = ctx.guild\r\n elif type(guild) == int:\r\n guild = discord.utils.get(ctx.bot.guilds, id=guild)\r\n elif type(guild) == str:\r\n guild = discord.utils.get(ctx.bot.guilds, name=guild)\r\n banner = await guild.banner_url_as(format=\"png\").read()\r\n with io.BytesIO(banner) as f:\r\n await ctx.send(file=discord.File(f, \"banner.png\"))",
"def getImage(cardTitle, size=\"normal\"):\n page = requests.get(\"https://api.scryfall.com/cards/named?exact=\"+name)\n page_json = json.loads(page.content)\n image_link = page_json[\"image_uris\"][size]\n image_response = requests.get(image_link)\n img = Image.open(BytesIO(image_response.content))\n return img.resize((384, 535)).convert(\"1\")",
"def download_banner(self, banner_path):\n serie = self._root.find('Series')\n banner = unicode(serie.find('banner').text)\n if banner != '' and not os.path.isfile(banner_path):\n urllib.urlretrieve(self.URL_BANNER + banner, banner_path)",
"def get_url_for_min_resolution(self, min_height, min_width, image):",
"def banner(name):\n print \"#\"\n print \"# {0}\".format(name.encode('utf-8'))\n print \"#\"\n return name"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of a brief subtitle overview persubtitle. e.g. English, Forced, SubRip (SRT) English, SubRip (SRT) English, SDH, SubRip (SRT) Spanish, Latin American (SDH), SubRip (SRT) The bit of text between the Language and the Subtitle format is the Track Title. It can be of any format, but it is recommended to be used as shown above. It will be returned as a list of strings with the ` ` already prepended to each entry. | def get_subtitle_print(subs: List[Track]) -> List[str]:
data = []
if not subs:
data.append("--")
for sub in subs:
line_items = []
# following sub.title tree checks and supports three different language and title scenarios
# The second scenario is the recommended option to choose if you are open to choosing any
# The third scenario should be used if you have nothing unique to state about the track
# | Language | Track Title | Output |
# | ------------ | ----------------------------- | --------------------------------------------- |
# | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |
# | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |
# | es / Spanish | None | - Spanish, SubRip (SRT) |
language = pycountry.languages.get(alpha_2=sub.language).name
if sub.title:
if language.lower() in sub.title.lower():
line_items.append(sub.title)
else:
line_items.append(f"{language}, {sub.title}")
else:
line_items.append(language)
line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)"))
line = "- " + ", ".join(line_items)
data += [
(" " + x if i > 0 else x)
for i, x in enumerate(textwrap.wrap(line, 64))
]
return data | [
"def video_get_title_description(self):\r\n return track_description_list(libvlc_video_get_title_description(self))",
"def get_title(self):\n return [i['title'] for i in self]",
"def video_get_chapter_description(self, title):\r\n return track_description_list(libvlc_video_get_chapter_description(self, title))",
"def subtitle(self,) -> str:\n return self.__data['Description']",
"def getURLSubtitle(url = \"http://www.davedraper.com/pmwiki/pmwiki.php/Main/Main\"):\n soup = BeautifulSoup(urllib.request.urlopen(url).read())\n content = soup.find('div',attrs = {'id' : 'wikitext'})\n subtitle = content.select('p > strong')\n subtitle_list = [str(subt1.next) for subt1 in subtitle]\n return subtitle_list",
"def getatitle(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(r'\\s+', allcontent[i])\n if words[0] == \"Title\":\n for j in range(2, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))",
"def SubTitle(Text):\n pass",
"def subsubjects(self,) -> typing.List[str]:\n return [topic_data['Title'] for topic_data in self.__data['ConnectedSubTopics']]",
"def titles():\n return [row['title'] for row in\n UnreviewedReadout(MockRequest()).rows()]",
"def get_all_titles(document: dict) -> str:\n return \" \".join([chapter[\"section\"] for chapter in document[\"body_text\"]] + [get_title(document)])",
"def redditTitle(sub, limit =1):\n for text in reddit.subreddit(sub).top('day', limit=limit):\n title = text.title\n return title",
"def mush_title(title):\n words = title.split(\" \")\n mushed_title = \"\"\n for word in words:\n mushed_title += word\n return [mushed_title]",
"def book_title(title):\n # this will capitalize the first letter of every word\n title = title.title()\n pre_title = []\n pre_title = title.split(\" \")\n new_title = \"\"\n for word in pre_title:\n # If the word is the first word of the title it has to be capitalize\n if word != pre_title[0]:\n # If the word is in the small word list make it lower case\n if word.lower() in small_words:\n word = word.lower()\n new_title = new_title + word + ' '\n# Remove the lagging space \n return new_title.strip()",
"def text_title(filename):\n lines = pdf_text(filename).strip().split('\\n')\n\n i = title_start(lines)\n j = title_end(lines, i)\n\n return ' '.join(line.strip() for line in lines[i:j])",
"def list_titles(genre):\n text = genre_html(genre)\n num_titles = text.count('title=')\n\n titles = []\n for i in range(num_titles):\n start = text.find('title=')\n end = text[start+7:].find('\">')\n title = text[start+7:start+end]\n titles.append(title)\n text = text[start+7:]\n\n return titles",
"def extract_titles(self, preprocessed_input):\n titles = []\n\n if (preprocessed_input.find(\"\\\"\") != -1):\n\n # if the doc contains quotations\n start = preprocessed_input.find(\"\\\"\")\n while start != -1:\n end = preprocessed_input.find(\"\\\"\", start+1)\n title = preprocessed_input[start+1:end]\n titles.append(str(title))\n start = preprocessed_input.find(\"\\\"\", end+1)\n\n else :\n\n # if doc does not contain quotations\n feelingwords = [\"think\", \"thought\", \"felt that\", \"enjoy\", \"enjoyed\", \"like\", \"hate\", \"hated\"]\n endwords = [\"was\", \"is\", \"has\", \"\\.\", \"\\!\", \"\\,\"]\n for word in feelingwords:\n firstletter = preprocessed_input.find(word)\n if firstletter != -1:\n start = firstletter + len(word)\n for endW in endwords :\n end = preprocessed_input.find(endW)\n if end != -1:\n title = preprocessed_input[start+1: end-1]\n titles.append(str(title.lower()))\n\n s = self.convert_input(preprocessed_input)\n while s.find(\"/\") != -1:\n films = list(self.t.prefixes(s))\n\n if len(films):\n for m in films:\n # print(\"MOVIE\", m[1])\n titles.append(m[1])\n s = s[s.find(\"/\")+1:]\n # print(s, films)\n return titles",
"def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)",
"def extract_subtitle_track(path_to_mkv):\n handler = SubtitleHandler()\n with open(path_to_mkv, \"rb\") as fp:\n mkvparse.mkvparse(fp, handler)\n\n return handler.subs",
"def extract_titles(p, finp):\n\n pat2 = re.compile(r'<li>(.+?)</li>')\n titles = []\n with open(p+ \"/\" + finp, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n s = pat2.search(line)\n if s:\n firstchar = s.group(1)[0]\n if firstchar != \"M\" and (firstchar.isupper() or firstchar.isdigit()):\n # print(s.group(1))\n titles.append(s.group(1))\n\n # print(\"\\nPDF:\", finp, \",CÍMEK SZÁMA: \", len(titles))\n # print(\"\\n###########################################\\n\")\n return titles"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The mins method returns the lower bounds of the action spaces' parameters. | def mins(self) -> Tensor:
return self._ranges[:, 0] | [
"def mins(self):\n return self.intervals[:, 0]",
"def _rrv_minmax_ ( s ) :\n return s.getMin(),s.getMax()",
"def argmin(self) -> int:\n return self.actuator_values.index(self.min)",
"def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv",
"def get_params_lower_bound():\n return [x for x, y in param_value_range]",
"def __get_mins_and_maxs__(self, KNN_A):\n number_of_points = len(KNN_A)\n number_of_dimensions = len(KNN_A[0])\n\n mins = [1e10] * number_of_dimensions\n maxs = [-1e10] * number_of_dimensions\n\n for i in range(number_of_points):\n for j in range(number_of_dimensions):\n if KNN_A[i][j] < mins[j]:\n mins[j] = KNN_A[i][j]\n if KNN_A[i][j] > maxs[j]:\n maxs[j] = KNN_A[i][j]\n\n return mins, maxs",
"def calculate_min_max_tiles(self):",
"def get_parameters_min(self):\n minValues = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n minValues[i] = p.get_min_value()\n i += 1\n return minValues",
"def getMinMaxTuple(self):\n return tuple(self.regionMin+self.regionMax)",
"def get_minmax(self, stmt, slist):\n minel = maxel = None\n for s in slist:\n if s.keyword == \"min-elements\":\n minel = s.arg\n elif s.keyword == \"max-elements\":\n maxel = s.arg\n if minel is None:\n minst = stmt.search_one(\"min_elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max_elements\")\n if maxst:\n maxel = maxst.arg\n return (minel, maxel)",
"def fitness_mins(self):\n return [min(f) for f in self.data[\"fitness\"]]",
"def test_minefield_params(self):\n tminefield = Minefield(10, 10)\n print(Minefield.min",
"def potential_min(self):\n\n return self._args.min",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)",
"def lower_bounds(self, *args):\n def sa(a, b=self._cplex.variables.get_num() - 1):\n return unzip(CPX_PROC.boundsa_lower(self._env._e, self._cplex._lp, a, b))\n return apply_freeform_two_args(\n sa, self._cplex.variables._conv, args)",
"def return_extents(self):\n\n return [qm.tree.mins, qm.tree.maxs]",
"def __positionMinConstraint(self, x, y):\n if self.__filterReentrant.locked():\n # Ignore the constraint when we set an explicit value\n return x, y\n vmax = self.getMax()\n if vmax is None:\n return x, y\n return min(x, vmax), y",
"def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The maxs method returns the upper bounds of the action spaces' parameters. | def maxs(self) -> Tensor:
return self._ranges[:, 1] | [
"def maxs(self):\n return self._maxs",
"def get_parameters_max(self):\n maxValues = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n maxValues[i] = p.get_max_value()\n i += 1\n return maxValues",
"def get_params_upper_bound():\n return [y for x, y in param_value_range]",
"def maxQ(self,state):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n \r\n for a in self.actions:\r\n q = self.Q(state,a)\r\n #print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)",
"def maxs(self):\n return self.intervals[:, 1]",
"def max_grains(self):\n index = self._ordered_input_names.index('max_grains')\n return self._inputs[index]",
"def maxQ(self,state):\r\n maxA = 0\r\n maxQ = float(\"-inf\")\r\n for aCurr in self.actions:\r\n qCurr = self.Q[(state,aCurr)]\r\n if qCurr > maxQ:\r\n maxA = aCurr\r\n maxQ = qCurr \r\n return(maxQ,maxA)",
"def argmax(self) -> int:\n return self.actuator_values.index(self.max)",
"def max_n(self, state, player):\n if state.cutoff_test():\n end = self.utility(state)\n ##print(end)\n return (end, None)\n\n v_max = np.full(3, float(-10000))\n best_a = None\n\n for action in available_actions(state):\n (v, irrelevant) = self.max_n(state.result(player, action), next_turn(player))\n if v[_PLAYERS[player]] > v_max[_PLAYERS[player]]:\n v_max = v\n best_a = action\n\n ##if best_a is not None and best_a[0] == EXIT:\n ##print(v_max)\n #print(best_a)\n return (v_max, best_a)",
"def max(self) -> int:",
"def get_max(self):",
"def get_upper_bounds(self, *args):\n def getub(a, b=self.get_num() - 1):\n return CPX_PROC.getub(self._env._e, self._cplex._lp, a, b)\n return apply_freeform_two_args(getub, self._conv, args)",
"def max_overs(self):\n return self._max_overs",
"def getBetaMinMax(self):\t\n\t\treturn (self.getParam(\"beta_min\"),self.getParam(\"beta_max\")\t)",
"def maxQ(self,feat):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n for a in self.actions:\r\n q = self.Q(feat,a)\r\n print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)",
"def fitness_maxs(self):\n return [max(f) for f in self.data[\"fitness\"]]",
"def max_values(self, lower, upper):\n if not self.upper_bounds is None:\n return self.upper_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.upper_bounds = plus.dot(upper) + minus.dot(lower) + self.const\n \n return self.upper_bounds",
"def upper_bounds(self, *args):\n def sa(a, b=self._cplex.variables.get_num() - 1):\n return unzip(CPX_PROC.boundsa_upper(self._env._e, self._cplex._lp, a, b))\n return apply_freeform_two_args(\n sa, self._cplex.variables._conv, args)",
"def maxBound(self, dims):\n return 16000"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The _generate_iterator method creates an iterator which runs over all possible parameter combinations | def _generate_iterator(self) -> Iterable:
params: List[Tensor] = []
for angle_range in self._ranges:
lin_space: Tensor = linspace(angle_range[0], angle_range[1], steps=self._num_steps)
params.append(lin_space)
power: int
dims: int
for i in range(0, self._num_params):
power = len(self._ranges) - 1 - i
dims = i
params[i] = params[i].repeat_interleave(self._num_steps ** power)
params[i] = params[i].broadcast_to((self._num_steps ** dims, self._num_steps ** (power + 1))).flatten()
return zip(*params) | [
"def __iter__(self):\n yield from generator(*self.args, **self.kwargs)",
"def generate_parameter_combinations(params_def):\n param_lists = []\n for param_name, param_def in iter(params_def.items()):\n param_values = generate_values_for_param(param_def)\n param_lists.append([(param_name, param_value) for param_value in param_values])\n\n for param_combination in itertools.product(*param_lists):\n yield {param[0]: param[1] for param in param_combination}",
"def __iter__(self):\n leaf_paths, leaf_vals = self._find_combinatorial_leaves()\n return self._combinations_generator(leaf_paths, leaf_vals)",
"def __iter__(self):\n for name in self.valid_params:\n yield name",
"def __http_requests_generator(request_template, parameters):\n for payload in itertools.product(*parameters):\n yield request_template.format(*payload), payload",
"def _build_iter(self):\n raise NotImplementedError()",
"def parameters(self):\n for parameters in self:\n for parameter in parameters:\n yield parameter",
"def generate_assignment(parameters):\n if len(parameters) == 0:\n yield []\n raise StopIteration()\n cp_pars = copy.deepcopy(parameters)\n par, values = cp_pars.popitem()\n for val in values:\n for r in generate_assignment(cp_pars):\n yield r + [(par,val)]",
"def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def __iter__(self):\n return iproduct(*self.sets)",
"def parameter_combinations(cls):\n return cls.generate_parameter_combinations({})",
"def __iter__(self):\n for o in self._iter:\n yield o",
"def __iter__(self) -> Iterator[Tuple[int, int, Dict]]:\n for job_config in product(\n self._keyspace[\"input_draw\"], self._keyspace[\"random_seed\"], self.branches\n ):\n if job_config[2] is None:\n job_config[2] = {}\n yield job_config",
"def iterparams(params: Dict[str, List[Any]]) -> Dict[str, Any]:\n for set in product(*params.values()):\n yield dotdict(zip(params.keys(), set))",
"def _generate_combinations(self, param_idx, params):\n\n if param_idx == len(self.grid) - 1:\n # last parameter, just return list of values for this parameter\n return [[value] for value in self.grid[params[param_idx]]]\n else:\n subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations\n result = []\n\n # iterate over all values of current parameter\n for value in self.grid[params[param_idx]]:\n for subcombination in subcombinations:\n result.append([value] + subcombination)\n\n return result",
"def __iter__(self):\n return self.new_generator()",
"def test_hyperparameter_iterator():\n algorithm_component = _algorithm_component(\n hyperparameters=MOCK_HYPERPARAMS)\n hyperparam_settings = list(algorithm_component.hyperparameter_iterator())\n hnames = [\n \"TestComponent__hyperparameter1\",\n \"TestComponent__hyperparameter2\"]\n for settings in (dict(zip(hnames, s)) for s in\n product([1, 2, 3], [\"a\", \"b\", \"c\"])):\n assert settings in hyperparam_settings",
"def g_iter(n):\n\tpass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to rotate one vector to another, inspired by vrrotvec.m in MATLAB | def vrrotvec(a,b):
a = normalize(a)
b = normalize(b)
ax = normalize(np.cross(a,b))
angle = np.arccos(np.minimum(np.dot(a,b),[1]))
if not np.any(ax):
absa = np.abs(a)
mind = np.argmin(absa)
c = np.zeros((1,3))
c[mind] = 0
ax = normalize(np.cross(a,c))
r = np.concatenate((ax,angle))
return r | [
"def create_rotvec(vec1, vec2):\n angle = vec_ang(vec1, vec2)\n vec3 = np.cross(vec1, vec2)\n vec3 *= angle / vec_norm(*vec3)\n return vec3",
"def rotateVector(rot, v):\r\n print 'rotateVector:', 'heading', rot[1], 'attitude', rot[2], 'bank', rot[0]\r\n print 'input', v\r\n if rot[1]: v = matmult([v], rot_Y(rot[1]))[0]\r\n print ' in y', v\r\n if rot[2]: v = matmult([v], rot_Z(rot[2]))[0]\r\n print ' in z', v\r\n if rot[0]: v = matmult([v], rot_X(rot[0]))[0]\r\n print ' in x', v\r\n return v",
"def transformFromRotVecInPlace(*args):\n return _almathswig.transformFromRotVecInPlace(*args)",
"def svecRotate(v, T):\n \n return svec(Rotate(smat(v), T))",
"def rotate_vectors(q, vec):\n rot_vec = []\n for i, v in enumerate(vec):\n rot_vec.append(q.rotate(v))\n return rot_vec",
"def rotate_mueller(pol_vec, rotation):\n\n return np.dot(pol_vec, rotation)",
"def cartesian_rotation(vector, x_angle, y_angle, z_angle):\n R_vector = np.matmul(\n R_z(z_angle), np.matmul(\n R_y(y_angle), np.matmul(\n R_x(x_angle), vector\n )\n )\n )\n return R_vector",
"def vec_rotate_right(x):\n return jnp.roll(x, 1)",
"def rotate(vector, angle):\n return np.cos(angle) * vector[0] + np.sin(angle) * vector[1], \\\n -np.sin(angle) * vector[0] + np.cos(angle) * vector[1]",
"def rotate(vector: np.ndarray,\n angle: float,\n origin: np.ndarray) -> np.ndarray:\n v1 = vector - origin\n \n x = v1[0]\n y = v1[1]\n x1 = x * np.cos(angle) - y * np.sin(angle)\n y1 = x * np.sin(angle) + y * np.sin(angle)\n return np.array([x1, y1]) + origin",
"def rotate(vec,axis,angle):\n theta = angle/180. *np.pi\n n = np.array(axis)\n n = n/np.linalg.norm(n)\n newvec = np.zeros(3,dtype=float)\n newvec = n *np.dot(n,vec) \\\n +(vec- n*np.dot(n,vec))*np.cos(theta) \\\n +np.cross(vec,n)*np.sin(theta)\n return newvec",
"def rotate_vector(v, angle):\n rangle = radians(angle)\n rotmat = np.array([[cos(rangle), -sin(rangle)],\n [sin(rangle), cos(rangle)]])\n return rotmat.dot(v)",
"def rotate(initial_vector, rotated_vector, other_vectors):\n\n init_vec_norm = normalize(initial_vector)\n rot_vec_norm = normalize(np.asarray(rotated_vector))\n middle_vec_norm = normalize(init_vec_norm + rot_vec_norm)\n first_reflector = init_vec_norm - middle_vec_norm\n second_reflector = middle_vec_norm - rot_vec_norm\n Q1 = householder(first_reflector)\n Q2 = householder(second_reflector)\n reflection_matrix = np.matmul(Q2, Q1)\n rotated_vectors = np.matmul(other_vectors, np.transpose(reflection_matrix))\n return rotated_vectors",
"def find_relative_vector_rotation(a, b):\n\n a = a / np.linalg.norm(a)\n b = b / np.linalg.norm(b)\n if np.linalg.norm(a - b) < 0.001:\n return np.eye(3)\n\n v = np.cross(a, b)\n c = np.dot(a, b)\n s = np.linalg.norm(v)\n Im = np.identity(3)\n vXStr = '{} {} {}; {} {} {}; {} {} {}'.format(0, -v[2], v[1], v[2], 0, -v[0], -v[1], v[0], 0)\n k = np.array(np.matrix(vXStr))\n R = Im + k + np.matmul(k, k) * ((1 - c)/(s**2))\n return R",
"def toVector(self, *args):\n return _almathswig.Rotation_toVector(self, *args)",
"def rotate (vect, angle, axis):\n\n cosine = np.cos (angle)\n sine = np.sin (angle)\n\n return (vect * cosine + \\\n sine * np.cross (axis, vect) + \\\n np.dot (axis, vect) * (1 - cosine) * axis)",
"def RotateVector(rotation, vector):\n return Vector(\n rotation.rot[0][0]*vector.x + rotation.rot[1][0]*vector.y + rotation.rot[2][0]*vector.z,\n rotation.rot[0][1]*vector.x + rotation.rot[1][1]*vector.y + rotation.rot[2][1]*vector.z,\n rotation.rot[0][2]*vector.x + rotation.rot[1][2]*vector.y + rotation.rot[2][2]*vector.z,\n vector.t\n )",
"def rotate(self, points, rot_vec):\n theta = np.linalg.norm(rot_vec)\n with np.errstate(invalid='ignore'):\n v = rot_vec / theta\n v = np.nan_to_num(v)\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n\n return cos_theta * points + sin_theta * np.cross(v, points) + (points.dot(v.T) * (1 - cos_theta)).dot(v)",
"def quat_rot_vec(quat,vec):\n vec = np.array(vec)\n quat = np.array(quat)\n qvec = np.zeros(4)\n qvec[1:4] = vec\n return quat_mult(quat_inv(quat),quat_mult(qvec, quat))[1:4]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort the buses reversed by their period, having tagged them with their position in the sequence, which is their c value. >>> list(prep_input(EXAMPLE_BUSES)) [(59, 4), (31, 6), (19, 7), (13, 1), (7, 0)] | def prep_input(buses):
return sorted([(bus, offset)
for offset, bus
in enumerate(buses)
if bus], reverse=True) | [
"def dmet_bath_orb_sort(t_list, e_before, c_before):\n\n # Sort the orbital energies (Occupation of 1.0 should come first...)\n new_index = np.maximum(-e_before, e_before - 2.0).argsort()\n\n # Throw away some orbitals above threshold\n thresh_orb = np.sum(-np.maximum(-e_before, e_before - 2.0)[new_index] > 1e-13)\n\n # Determine the number of bath orbitals\n norb = min(np.sum(thresh_orb), t_list[0])\n t_list.append(norb)\n\n # Sort the bath orbitals with its energies\n e_new = e_before[new_index]\n c_new = c_before[ : , new_index]\n \n return e_new, c_new",
"def bsort(seq, cmp):\n sorted = False # assume the seq is not sorted to start with\n while not sorted:\n sorted = True # assume it's already sorted correctly\n for index, value in enumerate(seq): # for every element in seq\n if index > 0: # past the first..\n if not cmp(seq[index-1], value): # if this element is out of order\n sorted = False # then the list is not sorted yet\n seq[index-1], seq[index] = seq[index], seq[index-1] # and swap it",
"def calculate_finishing_order(x):\n\t# Creates a list of keys which are sorted by their values\n\n\treturn [sailor_names for sailor_names,sailorValues in sorted(x.items(), key=lambda y: y[1], reverse=True)]",
"def sort_auto(self):\n key = lambda buz1, buz2: buz1 if buz1.trip_duration <= buz2.trip_duration else buz2\n self.autobuze.sort(key=key)",
"def bordasOf(self, bundle):\n\t\treturn sorted([self.borda[item] for item in bundle], reverse=True)",
"def comb_sort(data):\n shrink_factor = 1.3\n gap = len(data)\n swapped = True\n i = 0\n\n while gap > 1 or swapped:\n # Update the gap value for a next comb\n gap = int(float(gap) / shrink_factor)\n\n swapped = False\n i = 0\n\n while gap + i < len(data):\n if data[i] > data[i + gap]:\n # Swap values\n data[i], data[i + gap] = data[i + gap], data[i]\n swapped = True\n i += 1\n\n return data",
"def sort_currency_list_if_changed(self):\r\n currency_list = self.gox.wallet.keys()\r\n if len(currency_list) == len(self.sorted_currency_list):\r\n return\r\n\r\n # now we will bring base and quote currency to the front and sort the\r\n # the rest of the list of names by acount balance in descending order\r\n if self.gox.curr_base in currency_list:\r\n currency_list.remove(self.gox.curr_base)\r\n if self.gox.curr_quote in currency_list:\r\n currency_list.remove(self.gox.curr_quote)\r\n currency_list.sort(key=lambda name: -self.gox.wallet[name])\r\n currency_list.insert(0, self.gox.curr_quote)\r\n currency_list.insert(0, self.gox.curr_base)\r\n self.sorted_currency_list = currency_list",
"def getRevCodonSeqs(self):\r\n compDict = {'A': 't', 'T': 'a', 'G': 'c', 'C': 'g'} # nuc compliments for reverse strand\r\n revPep = [] # list to hold the temporary reverse peptides before incorporation into the complete list\r\n for seq in self.allPepSeqs:\r\n revSeq = seq[::-1] # reverses the strand to be prepped for nt compliments\r\n for nuc in compDict:\r\n revSeq = revSeq.replace(nuc, compDict[nuc]) # replaces nt's with their compliments\r\n revSeq = revSeq.upper()\r\n revPep.append(revSeq)\r\n for i in revPep:\r\n self.allPepSeqs.append(i) # adds the reverse strand peptide to the list of possible peptide seqs\r\n return",
"def bubbleSort(list):",
"def reversesort(self):\n ...",
"def reorder(items, before):",
"def gen_func_sub_bps(func_bps,sorted_bracket_pairs):\n #Find bracket pairs inside func_bps:\n func_sub_bps = list()\n for fbp in func_bps:\n temp_sub_bps = list()\n for bp in sorted_bracket_pairs:\n if bp[0]>fbp[0] and bp[1]<fbp[1]:\n temp_sub_bps.append(bp)\n func_sub_bps.append(temp_sub_bps)\n return func_sub_bps",
"def reverse_lists(self):\n self.dates.reverse()\n self.opens.reverse()\n self.highs.reverse()\n self.lows.reverse()\n self.closes.reverse()\n self.volumes.reverse()\n self.market_caps.reverse()",
"def sort(self):\n self.intervals.sort()",
"def sort_fasta_by_abundance(fasta_lines, fasta_out_f):\r\n seq_index = {}\r\n count = 0\r\n for seq_id, seq in parse_fasta(fasta_lines):\r\n count += 1\r\n try:\r\n seq_index[seq].append(seq_id)\r\n except KeyError:\r\n seq_index[seq] = [seq_id]\r\n\r\n seqs = []\r\n for k, v in seq_index.items():\r\n seqs.append((len(v), k, v))\r\n del seq_index[k]\r\n seqs.sort()\r\n for count, seq, seq_ids in seqs[::-1]:\r\n for seq_id in seq_ids:\r\n fasta_out_f.write('>%s\\n%s\\n' % (seq_id, seq))",
"def test_sort_reversed():\n data = [5, 4, 3, 2, 1]\n sorted_data = bubble_sort(data)\n\n assert sorted_data == [1, 2, 3, 4, 5]",
"def sort_exons_by_end(self, list_of_exons):\n\n return(sorted(list_of_exons, key=lambda x: x.end, reverse=False))",
"def order_ideal(self, gens):",
"def Bubble_Sort(numlist):\n\n\tfor i in range(len(numlist)):\n\t\tfor j in range(1, len(numlist)):\n\t\t\tif numlist[j] < numlist[j-1]:\n\t\t\t\tnumlist[j], numlist[j-1] = numlist[j-1], numlist[j]\n\n\treturn numlist"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reduce a bunch of periodic signals to a single signal. The value of x that answers the puzzle is the first place ( c + x ) % T = 0, that is to say, c + x = T, or x = Tc. >>> solve_buses(prep_input(EXAMPLE_BUSES)) 1068781 | def solve_buses(prepared_buses):
T, c = functools.reduce(combine_signals, prepared_buses)
return T - c | [
"def bus_electricities(component_resistances: np.ndarray) -> List[OhmicVars]:\n\n buses: List[OhmicVars] = []\n\n # It might be slightly more efficient to do matrix math instead of using a for loop, but this\n # is more readable :)\n for bus_n in range(0, 4):\n # Calculate 1/resistance for each component, then sum them all up to find the resistance on a power bus.\n # We shouldn't have to guard against divide-by-zero here, because resistances shouldn't\n # be zero. If we _do_ divide by zero, we've configured np.seterr elsewhere to raise an exception.\n components_on_this_bus = (electroconstants.COMPONENT_BUS_CONNECTION_MATRIX[bus_n] != 0)\n sum_of_reciprocals = np.sum(np.reciprocal(component_resistances[components_on_this_bus]))\n\n if sum_of_reciprocals == 0.0:\n # There are no components connected, so this simplifies our calculations.\n buses.append(\n OhmicVars(resistance=np.inf, current=0, voltage=electroconstants.HAB_PRIMARY_BUS.nominal_voltage))\n continue\n\n bus_resistance = 1 / sum_of_reciprocals\n\n # Assuming the hab reactor has an internal resistance, calculate the voltage drop given the bus load.\n # This equation is derived from https://en.wikipedia.org/wiki/Internal_resistance, specifically\n # R_reactor_internal = (V_bus_nominal / V_bus_loaded - 1) * R_bus_loaded\n # If we solve for V_bus_loaded (since we know the value of all other variables):\n # V_bus_loaded = V_bus_nominal / (R_reactor_internal / R_bus_loaded + 1)\n bus_voltage = (\n electroconstants.HAB_PRIMARY_BUS.nominal_voltage\n / (electroconstants.HAB_PRIMARY_BUS.primary_power_source.internal_resistance / bus_resistance + 1)\n )\n\n # Ohm's law gives us this.\n bus_current = bus_voltage / bus_resistance\n buses.append(OhmicVars(voltage=bus_voltage, current=bus_current, resistance=bus_resistance))\n\n return buses",
"def noise_reduction_bassan_nipals(setToReduce, numberComponentsToKeep, inverseTransform):\n \n \n if setToReduce.shape[0] < 4097:\n tol = 1e-2\n \n else:\n tol = 1e-1\n \n Tprinc = numpy.ones((setToReduce.shape[0], numberComponentsToKeep))\n Pprinc = numpy.ones((setToReduce.shape[1], numberComponentsToKeep))\n \n j = 0\n while j < numberComponentsToKeep:\n T = Tprinc[:,j].reshape((-1, 1))\n d = 1.0\n \n while d > tol:\n P = setToReduce.T @ T\n P = P / numpy.linalg.norm(P)\n Told = T\n denominator = (P.T @ P)[0, 0]\n T = (setToReduce @ P) / denominator\n d = numpy.linalg.norm(Told - T)\n \n \n setToReduce = setToReduce - (T @ P.T)\n Tprinc[:,j] = T.reshape((-1,))\n Pprinc[:,j] = P.reshape((-1,))\n \n j += 1\n \n if inverseTransform == True:\n result = Tprinc[:, 0:numberComponentsToKeep] @ Pprinc[:, 0:numberComponentsToKeep].T\n \n if not isinstance(result, numpy.ndarray):\n result = numpy.array([[result]])\n \n \n return result\n \n \n return Tprinc[:, 0:numberComponentsToKeep]",
"def part_one_function(self):\n # remove the x's\n self.buses = [x for x in self.buses if x != 'x']\n self.buses = [int(x) for x in self.buses]\n min_time = int(self.buses[0])-1\n min_bus = None\n for bus in self.buses:\n current_time = bus - self.wait_time % bus\n # if current_time == bus, that means it arrives at the same time\n if current_time == bus: current_time = 0\n if current_time < min_time:\n min_time = current_time\n min_bus = bus\n \n # Print the solution\n print(\"--------------------PART 1-----------------------\")\n print(f\"Answer: {min_bus * min_time}\")",
"def get_special_timestamp(buses):\n valid_buses = [(i, bus) for i, bus in enumerate(buses) if bus]\n # Use idea from Chinese Remainder Theorem\n first_bus = valid_buses[0][1]\n ans = 0\n current_product = first_bus\n for i, bus in valid_buses[1:]:\n while (ans + i) % bus != 0:\n ans += current_product\n current_product *= bus\n return ans",
"def get_quickest_bus(departure_time: int, buses: List[int]) -> int:\n quickest_bus = sorted(buses,\n key=lambda x: get_wait_time(departure_time, x),\n reverse=False)[0]\n\n return get_wait_time(departure_time, quickest_bus) * quickest_bus",
"def remove_infeasible_cycles(model, fluxes, fix=()):\n with TimeMachine() as tm:\n # make sure the original object is restored\n tm(do=int, undo=partial(setattr, model, 'objective', model.objective))\n exchange_reactions = model.exchanges\n exchange_ids = [exchange.id for exchange in exchange_reactions]\n internal_reactions = [reaction for reaction in model.reactions if reaction.id not in exchange_ids]\n for exchange in exchange_reactions:\n exchange_flux = fluxes[exchange.id]\n tm(do=partial(setattr, exchange, 'lower_bound', exchange_flux),\n undo=partial(setattr, exchange, 'lower_bound', exchange.lower_bound))\n tm(do=partial(setattr, exchange, 'upper_bound', exchange_flux),\n undo=partial(setattr, exchange, 'upper_bound', exchange.upper_bound))\n cycle_free_objective_list = []\n for internal_reaction in internal_reactions:\n internal_flux = fluxes[internal_reaction.id]\n if internal_flux >= 0:\n cycle_free_objective_list.append(Mul._from_args((FloatOne, internal_reaction.forward_variable)))\n tm(do=partial(setattr, internal_reaction, 'lower_bound', 0),\n undo=partial(setattr, internal_reaction, 'lower_bound', internal_reaction.lower_bound))\n tm(do=partial(setattr, internal_reaction, 'upper_bound', internal_flux),\n undo=partial(setattr, internal_reaction, 'upper_bound', internal_reaction.upper_bound))\n else: # internal_flux < 0:\n cycle_free_objective_list.append(Mul._from_args((FloatOne, internal_reaction.reverse_variable)))\n tm(do=partial(setattr, internal_reaction, 'lower_bound', internal_flux),\n undo=partial(setattr, internal_reaction, 'lower_bound', internal_reaction.lower_bound))\n tm(do=partial(setattr, internal_reaction, 'upper_bound', 0),\n undo=partial(setattr, internal_reaction, 'upper_bound', internal_reaction.upper_bound))\n\n cycle_free_objective = model.solver.interface.Objective(\n Add._from_args(cycle_free_objective_list), direction=\"min\", sloppy=True\n )\n model.objective = cycle_free_objective\n\n for reaction_id in fix:\n reaction_to_fix = model.reactions.get_by_id(reaction_id)\n tm(do=partial(setattr, reaction_to_fix, 'lower_bound', fluxes[reaction_id]),\n undo=partial(setattr, reaction_to_fix, 'lower_bound', reaction_to_fix.lower_bound))\n tm(do=partial(setattr, reaction_to_fix, 'upper_bound', fluxes[reaction_id]),\n undo=partial(setattr, reaction_to_fix, 'upper_bound', reaction_to_fix.upper_bound))\n\n try:\n solution = model.solve()\n except SolveError as e:\n logger.warning(\"Couldn't remove cycles from reference flux distribution.\")\n raise e\n result = solution.x_dict\n return result",
"def filter_buses(list_of_buses):\n for bus in list_of_buses:\n return bus",
"def oscillator_bifurcation(ders, inp_min, inp_max, d_inp, warmup_time=1000.0, short_warmup=200.0, time_window=500, dt=0.01):\n\tprint(\"Calculating biffurcation diagram...\")\n\tbif = [[],[]] # bifurcation list\n\t# initial conditions\n\tstate = [0.0 for i in range(len(ders))]\n\tstate[0] = 1.0\n\t# warmup\n\tfor i in range(round(warmup_time/dt)):\n\t\tstate = one_step_integrator(state, ders, inp_min, dt)\n\t# inp loop\n\tfor inp in [inp_min+d_inp*i for i in range(floor((inp_max-inp_min)/d_inp))]:\n\t\tprint(\"\\tb = \", \"%.3f\" % inp, \"/\", inp_max)\n\t\txhhhh = state[0] # pre-pre-pre-previous value\n\t\txhhh = state[0] # pre-pre-previous value\n\t\txhh = state[0] # pre-previous value\n\t\txh = state[0] # previous value\n\t\t# warmup\n\t\tfor i in range(round(short_warmup/dt)):\n\t\t\tstate = one_step_integrator(state, ders, inp, dt)\n\t\tfor t in range(round(time_window/dt)):\n\t\t\txhhhh = xhhh\n\t\t\txhhh = xhh\n\t\t\txhh = xh\n\t\t\txh = state[0]\n\t\t\tstate = one_step_integrator(state, ders, inp, dt)\n\t\t\tif(xhhhh < xhhh and xhhh < xhh and xhh > xh and xh > state[0]): # 5 point local maximum\n\t\t\t\tbif[0].append(inp)\n\t\t\t\tbif[1].append(xhh)\n\treturn bif",
"def _decode_sum_of_sincs(self, spikes):\n\n q = self.get_measurement_vector(spikes)\n integral_measurement_matrix = self._get_sinc_integral_matrix(spikes)\n flat_fwd_mixing = self._flatten_mixing_matrix(self.mixing_matrix)\n flat_bwd_mixing = self._flatten_mixing_matrix(\n np.linalg.pinv(self.mixing_matrix)\n )\n PCS_sampler = self._get_PCS_sampler(spikes)\n\n operator_inverse = np.linalg.pinv(\n flat_bwd_mixing.dot(PCS_sampler)\n .dot(integral_measurement_matrix)\n .dot(flat_fwd_mixing)\n )\n\n x_sinc_amps = (\n operator_inverse.dot(flat_bwd_mixing).dot(PCS_sampler).dot(q)\n ).reshape((self.n_signals, len(self.sinc_locs)))\n\n return src.signals.bandlimitedSignals(self.Omega, self.sinc_locs, x_sinc_amps)",
"def _update_boost(self):\r\n\t\t\r\n\t\tfor i, active in enumerate(self.outputs):\r\n\t\t\tif int(np.sum(active)) >= self.min_duty_cycle:\r\n\t\t\t\tself.boost[i] += self.boost_inc\r\n\t\t\telse:\r\n\t\t\t\tself.boost[i] = max(self.boost[i] - self.boost_dec, 0)",
"def _expand_buses(pins_nets_buses):\n pins_nets = []\n for pnb in pins_nets_buses:\n if isinstance(pnb, Bus):\n pins_nets.extend(pnb.get_nets())\n else:\n pins_nets.append(pnb)\n return pins_nets",
"def bumps(x):\r\n K = lambda x : (1. + np.abs(x)) ** -4.\r\n t = np.array([[.1, .13, .15, .23, .25, .4, .44, .65, .76, .78, .81]]).T\r\n h = np.array([[4, 5, 3, 4, 5, 4.2, 2.1, 4.3, 3.1, 2.1, 4.2]]).T\r\n w = np.array([[.005, .005, .006, .01, .01, .03, .01, .01, .005, .008, .005]]).T\r\n return np.sum(h*K((x-t)/w), axis=0)",
"def bumps(x):\n K = lambda x : (1. + np.abs(x)) ** -4.\n t = np.array([[.1, .13, .15, .23, .25, .4, .44, .65, .76, .78, .81]]).T\n h = np.array([[4, 5, 3, 4, 5, 4.2, 2.1, 4.3, 3.1, 2.1, 4.2]]).T\n w = np.array([[.005, .005, .006, .01, .01, .03, .01, .01, .005, .008, .005]]).T\n return np.sum(h*K((x-t)/w), axis=0)",
"def prep_input(buses):\n return sorted([(bus, offset) \n for offset, bus \n in enumerate(buses) \n if bus], reverse=True)",
"def compute_bias(ics, vbc):\n import os, time\n from seren3.array import SimArray\n \n # Compute size of grid and boxsize (for this patch)\n N = vbc.shape[0]\n boxsize = ics.boxsize.in_units(\"Mpc a h**-1\") * (float(N) / float(ics.header.N))\n\n # Compute vbc @ z=1000\n z = ics.z\n rms = vbc_rms(vbc)\n rms_recom = rms * (1001./z)\n\n # Check for PS and run CICsASS if needed\n fname_vbc0 = vbc_ps_fname(0., z, boxsize)\n if not os.path.isfile(fname_vbc0):\n exit_code = run_cicsass(boxsize, z, 0., fname_vbc0)\n\n fname_vbcrecom = vbc_ps_fname(rms_recom, z, boxsize)\n if not os.path.isfile(fname_vbcrecom):\n exit_code = run_cicsass(boxsize, z, rms_recom, fname_vbcrecom)\n\n # Load power spectra and compute bias\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n # Should have same lenghts if finished writing\n count = 0\n while len(ps_vbcrecom[1]) != len(ps_vbc0[1]):\n count += 1\n if count > 10:\n raise Exception(\"Reached sleep limit. Filesizes still differ\")\n time.sleep(5)\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n #CDM bias\n b_cdm = ps_vbcrecom[1] / ps_vbc0[1]\n # Baryon bias\n b_b = ps_vbcrecom[2] / ps_vbc0[2]\n # Wavenumber\n k_bias = SimArray(ps_vbcrecom[0] / ics.cosmo[\"h\"], \"h Mpc**-1\")\n\n return k_bias, b_cdm, b_b",
"def drag_schedules(beta_list, qubits, pulse_amp, pulse_width,\n pulse_sigma=None,\n width_sigma_ratio=4, drives=None, cmd_def=None,\n inst_map=None, meas_map=None):\n\n xdata = beta_list\n\n # copy the instruction to schedule mapping\n inst_map = copy.deepcopy(inst_map)\n if not inst_map:\n inst_map = copy.deepcopy(cmd_def)\n\n if pulse_sigma is None:\n pulse_sigma = pulse_width / width_sigma_ratio\n\n # Construct the circuits\n qr = qiskit.QuantumRegister(max(qubits) + 1)\n cr = qiskit.ClassicalRegister(len(qubits))\n\n circuits = []\n\n for circ_index, b_amp in enumerate(beta_list):\n\n circ = qiskit.QuantumCircuit(qr, cr)\n circ.name = 'dragcircuit_%d_0' % circ_index\n\n for qind, qubit in enumerate(qubits):\n\n # positive drag pulse\n drag_pulse = pulse_lib.drag(duration=pulse_width,\n amp=pulse_amp[qind],\n beta=b_amp,\n sigma=pulse_sigma,\n name='drag_pulse_%d_%d' % (circ_index,\n qubit))\n\n drag_gate = Gate(name='drag_%d_%d' % (circ_index, qubit),\n num_qubits=1, params=[])\n\n # add commands to schedule\n schedule = pulse.Schedule(name='drag_pulse_%f_%d' % (b_amp,\n qubit))\n\n schedule += drag_pulse(drives[qubit])\n\n # append this schedule to the inst_map\n inst_map.add('drag_%d_%d' % (circ_index, qubit), qubits=[qubit],\n schedule=schedule)\n\n # negative pulse\n drag_pulse2 = pulse_lib.drag(duration=pulse_width,\n amp=-1*pulse_amp[qind],\n beta=b_amp,\n sigma=pulse_sigma,\n name='drag_pulse_%d_%d' % (circ_index,\n qubit))\n\n drag_gate2 = Gate(name='drag2_%d_%d' % (circ_index, qubit),\n num_qubits=1, params=[])\n\n # add commands to schedule\n schedule2 = pulse.Schedule(name='drag_pulse2_%f_%d' % (b_amp,\n qubit))\n\n schedule2 += drag_pulse2(drives[qubit])\n\n # append this schedule to the inst_map\n inst_map.add('drag2_%d_%d' % (circ_index, qubit), qubits=[qubit],\n schedule=schedule2)\n\n circ.append(drag_gate, [qr[qubit]])\n # circ.u1(np.pi, [qr[qubit]])\n circ.append(drag_gate2, [qr[qubit]])\n\n for qind, qubit in enumerate(qubits):\n circ.measure(qr[qubit], cr[qind])\n\n circuits.append(circ)\n\n # schedule\n schedule_config = ScheduleConfig(inst_map, meas_map)\n drag_sched = [schedule_circuit(qcirc,\n schedule_config)\n for qcirc in circuits]\n\n return drag_sched, xdata",
"def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True",
"def reduce_B(self, B_on_standard_basis_array):\n # TODO: Check this description, then move to docstring\n #To see this dt effect, consider:\n #\n #dx/dt = Ax+Bu, approximate as (x^(k+1)-x^k)/dt = Ax^k + Bu^k.\n #Rearranging terms, x^(k+1) = (I+dt*A)x^k + dt*Bu^k.\n #The impulse response is: x^0=0, u^0=1, and u^k=0 for k>=1.\n #Thus x^1 = dt*B, x^2 = dt*(I+dt*A)*B, ...\n #and y^1 = dt*C*B, y^2 = dt*C*(I+dt*A)*B, ...\n #However, the impulse response to the true discrete-time system is\n #x^1 = B, x^2 = A_d*B, ...\n #and y^1 = CB, y^2 = CA_d*B, ...\n #(where I+dt*A ~ A_d)\n #The important thing to see is the factor of dt difference.\n\n self.B_reduced = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, B_on_standard_basis_array)\n if not self.is_basis_orthonormal:\n self.B_reduced = self._get_proj_mat() * self.B_reduced\n return self.B_reduced",
"def simulate_strategy_loop_known(\n num_buses,\n states,\n decisions,\n utilities,\n costs,\n ev,\n increments,\n num_periods,\n beta,\n unobs,\n):\n for period in range(num_periods):\n for bus in range(num_buses):\n\n old_state = states[bus, period]\n if (-costs[old_state, 0] + unobs[bus, period, 0] + beta * ev[old_state]) > (\n -costs[0, 0] - costs[0, 1] + unobs[bus, period, 1] + beta * ev[0]\n ):\n decision = 0\n utility = -costs[old_state, 0] + unobs[bus, period, 0]\n new_state = old_state + increments[bus, period]\n else:\n decision = 1\n utility = -costs[0, 0] - costs[0, 1] + unobs[bus, period, 1]\n new_state = increments[bus, period]\n\n decisions[bus, period] = decision\n utilities[bus, period] = utility\n states[bus, period + 1] = new_state\n return states, decisions, utilities"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method opening all images to test their validity. | def verify_images(root_dir, root_listdir):
counter = 0
for index, image_dir in enumerate(root_listdir):
images_listdir = os.listdir(root_dir + "/" + image_dir)
list_of_images_indices = [
image_index
for image_index in range(3, len(images_listdir) - 1)
if image_index % 2 == 0
]
for image_ind in list_of_images_indices:
filename = root_dir + "/" + image_dir + "/" + images_listdir[image_ind]
try:
im = Image.open(filename)
im.verify()
im.close()
except (OSError, ValueError):
counter += 1
print("%d files caused error due to OSError and ValueError." % counter) | [
"def _open_images(self):\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select image(s)...',\n directory=self._last_visited_dir, # home dir by default\n )\n self._add_files(filenames)",
"def _check_img_inversion(self):\n for image in [self.image_open, self.image_dmlc]:\n image.check_inversion()",
"def images_exist(self):\n pass",
"def list_open_images():\n\tid = 0\n\tif len(open_images) > 0:\n\t\tfor i in open_images:\n\t\t\tfilename = os.path.basename(i.get_filename())\n\t\t\tprint(id, filename)\n\t\t\tid += 1\n\telse:\n\t\tprint(\"No open images\")",
"def ensure_valid_images(self):\n expected_polygons = len(setPolygonCoordinates(1000,600)) # inseted values are placeholders\n unique_calib_successes = set(self.calib_successes)\n if len(unique_calib_successes) != expected_polygons:\n valid = set(np.arange(0,expected_polygons))\n missing = valid - unique_calib_successes\n arg_value = ' '.join(map(str, missing))\n raise AssertionError(\"Missing valid image sets for %i polygons. Re-run calibration with the\\n'-p %s' argument to re-capture images for these polygons.\" % (len(missing), arg_value))\n else:\n return True",
"def check_files(self):\n print('checking files')\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)",
"def test_all_images(self):\n mysite = self.get_site()\n ai = list(mysite.allimages(total=10))\n self.assertLessEqual(len(ai), 10)\n for image in ai:\n self.assertIsInstance(image, pywikibot.FilePage)\n\n for impage in mysite.allimages(start='Ba', total=5):\n self.assertIsInstance(impage, pywikibot.FilePage)\n self.assertTrue(impage.exists())\n self.assertGreaterEqual(impage.title(with_ns=False), 'Ba')\n # Bug T17985 - reverse and start combined; fixed in v 1.14\n for impage in mysite.allimages(start='Da', reverse=True, total=5):\n self.assertIsInstance(impage, pywikibot.FilePage)\n self.assertTrue(impage.exists())\n self.assertLessEqual(impage.title(with_ns=False), 'Da')\n for impage in mysite.allimages(prefix='Ch', total=5):\n self.assertIsInstance(impage, pywikibot.FilePage)\n self.assertTrue(impage.exists())\n self.assertTrue(impage.title(with_ns=False).startswith('Ch'))\n for impage in mysite.allimages(minsize=100, total=5):\n self.assertIsInstance(impage, pywikibot.FilePage)\n self.assertTrue(impage.exists())\n self.assertGreaterEqual(impage.latest_file_info['size'], 100)\n for impage in mysite.allimages(maxsize=2000, total=5):\n self.assertIsInstance(impage, pywikibot.FilePage)\n self.assertTrue(impage.exists())\n self.assertLessEqual(impage.latest_file_info['size'], 2000)",
"def select_images(self):\n files = QtGui.QFileDialog.getOpenFileNames(self,\n \"Select Your Image(s)\",\n self.cwd_open,\n \"Images (*.png *.jpg)\")\n for file in files:\n if os.path.isfile(file):\n self.cwd_open = os.path.dirname(file)\n image = Image.open(file)\n image = image.convert(\"RGBA\")\n self.add_image(HexifiedImage(image, os.path.basename(file)[0]))",
"def test_get_image_if_valid():\n\n # our image opened directly\n actual_img = Image.open(os.path.join(TEST_FOLDER, \"testmap.png\"))\n\n # image opened with the method in utilities\n with replace_stdin(io.StringIO(TEST_IMAGE)):\n test_img = utilities.get_image_if_valid(\"test prompt\", \"testmap.png\")\n\n # ensure they are the same size\n assert test_img.size == test_img.size\n # ensure the unique color lists are the same\n assert utilities.get_unique_color_list(\n test_img) == utilities.get_unique_color_list(actual_img)\n # ensure the whole map lists are the same\n assert utilities.get_color_map_list(\n test_img) == utilities.get_color_map_list(actual_img)",
"def check_files(self):\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)",
"def check_images():\n saved_stdout, saved_stderr = sys.stdout, sys.stderr\n\n out, err = StringIO(), StringIO()\n try:\n sys.stdout, sys.stderr = out, err\n check_images_main()\n except SystemExit:\n pass\n finally:\n stdout, stderr = out.getvalue().strip(), err.getvalue().strip()\n sys.stdout, sys.stderr = saved_stdout, saved_stderr\n\n return stdout, stderr",
"def _load_images(self):\n if not self.test_set:\n images = []\n masks = []\n for item in self.image_names:\n image = nrrd.read(os.path.join(self.input_folder, 'image', item))[0]\n mask = nrrd.read(os.path.join(self.input_folder, 'mask', item))[0]\n \n images.append(image)\n masks.append(mask)\n \n self.images = images\n self.masks = masks\n else:\n images = []\n for item in self.image_names:\n image = nrrd.read(os.path.join(self.input_folder, 'image', item))[0] \n images.append(image)\n \n self.images = images \n \n print(f\"Loaded {len(self.images)} images and {len(self.masks)} binary masks.\")",
"def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")",
"def run_test_show_images():\n print()\n print('--------------------------------------------------')\n print('Testing the show_images function:')\n print('--------------------------------------------------')\n\n # All of these images are exactly 178 by 128 pixels, which is the exact\n # screen resolution of the BRICK. They are made by Lego and ship\n # with the Lego Mindstorm EV3 Home Edition software.\n\n eyes = \\\n \"/home/robot/csse120/assets/images/ev3_lego/eyes_neutral.bmp\"\n angry_eyes = \\\n \"/home/robot/csse120/assets/images/ev3_lego/eyes_angry.bmp\"\n puppy_dog_eyes = \\\n \"/home/robot/csse120/assets/images/ev3_lego/eyes_disappointed.bmp\"\n sad_eyes = \\\n \"/home/robot/csse120/assets/images/ev3_lego/eyes_hurt.bmp\"\n shifty_eyes = \\\n \"/home/robot/csse120/assets/images/ev3_lego/eyes_pinch_left.bmp\"\n progress_0 = \\\n \"/home/robot/csse120/assets/images/ev3_lego/progress_bar_0.bmp\"\n progress_50 = \\\n \"/home/robot/csse120/assets/images/ev3_lego/progress_bar_50.bmp\"\n progress_100 = \\\n \"/home/robot/csse120/assets/images/ev3_lego/progress_bar_100.bmp\"\n teary_eyes = \\\n \"/home/robot/csse120/assets/images/ev3_lego/eyes_tear.bmp\"\n\n # Make Python images (using the PIL library) from the files:\n files = [eyes, angry_eyes, puppy_dog_eyes, sad_eyes, shifty_eyes,\n progress_0, progress_50, progress_100, teary_eyes]\n images = []\n for k in range(len(files)):\n images.append(Image.open(files[k]))\n\n print()\n input('Press ENTER on your keyboard when you are ready to see images'\n + ' on the ** BRICK\\'s ** screen:')\n\n print()\n\n show_images(images)\n\n print()\n print('Look at the TESTING CODE to see the names of the files')\n print('that contain the IMAGES that you saw.')\n print()",
"def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n shape = dim2, dim1\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(shape, obj.shape, \"dim1\")",
"def test_get_images_and_exract_attributes(self):\n for provider in IMAGE_PROVIDERS:\n current_provider = IMAGE_PROVIDERS[provider]\n images = current_provider.get_images(1)\n for image in images:\n image_attributes = current_provider.get_image_attributes(image)\n self.assertIsInstance(image_attributes, data_types.ImageAttributes)\n self.assertIsInstance(image_attributes.image_id, (str, type(None)))\n self.assertIsInstance(image_attributes.url, (str, type(None)))\n self.assertIsInstance(image_attributes.image_type, data_types.ImageType)\n self.assertIsInstance(image_attributes.date_shot, datetime)\n self.assertIsInstance(image_attributes.attribution, str)\n self.assertIsInstance(image_attributes.format, (str, type(None)))\n self.assertIsInstance(image_attributes.height_pixels, (int, type(None)))\n self.assertIsInstance(image_attributes.width_pixels, (int, type(None)))\n self.assertIsInstance(image_attributes.latitude, float)\n self.assertIsInstance(image_attributes.longitude, float)",
"def check_images(image_dir, image_types =\\\n ('*.jpg', '*.png', '*.bmp', '*.JPG', '*.BMP', '*.PNG')): \n # index all `image_types` in source path\n file_list = []\n for imtype in image_types:\n pattern = os.path.join(image_dir, imtype)\n file_list.extend(glob.glob(pattern))\n print 'Found', len(file_list), 'images'\n \n image_names_err = []\n image_names_all = []\n for (i, file_path) in enumerate(file_list):\n if i % (len(file_list)/20) == 0: print i,\n elif i % (len(file_list)/1000) == 0: print '.',\n\n try: \n file_dir, file_name = os.path.split(file_path)\n file_body, file_ext = os.path.splitext(file_name)\n image_names_all.append(file_name)\n load_img(file_path) # try to load\n except:\n image_names_err.append(file_name) \n return (image_names_err, image_names_all)",
"def number_of_images_a_valid():\r\n counter = 0\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n counter += 1\r\n if counter >= int(number_of_images_a.get()):\r\n return True\r\n else:\r\n messagebox.showwarning(\"Invalid Image Inputs\", (\r\n \"Not enough images of type a to create \"\r\n \"requested grid.\"))\r\n return False",
"def _DownloadImagesForResolutionGrouping(self) -> None:\n assert self._expectations\n test_expectations = self._expectations.get('primary',\n {}).get(self._test_name, {})\n positive_digests = [\n digest for digest, status in test_expectations.items()\n if status == 'positive'\n ]\n if not positive_digests:\n raise RuntimeError('Failed to find any positive digests for test %s' %\n self._test_name)\n for digest in positive_digests:\n content = self._DownloadImageWithDigest(digest)\n image = Image.open(io.BytesIO(content))\n self._images['%dx%d' % (image.size[0], image.size[1])].add(digest)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for a given template and list of extensions, find every file related to that template which has one of the extensions. | def find_template_companion_files(template: Path, extensions: Iterable[str], recurse_up_to: Path = None) -> Set[Path]:
files_to_check = []
# Get a list of all file names to look for in each folder
data_file_names = []
basename = template.name.split('.')[0]
for i in range(len(template.suffixes)):
ext = ''.join(template.suffixes[:i+1])
for data_file_ext in extensions:
data_file_names.append(Path(basename + ext).with_suffix(data_file_ext))
# Look for those files in the template's current folder (a.k.a. parent directory)
files_to_check.extend([template.parent / file_name for file_name in data_file_names])
if recurse_up_to and recurse_up_to in template.parents:
# Look for those files in every parent directory up to `recurse_up_to`,
# excluding the template's parent directory which has already been checked
relative_path = template.parent.relative_to(recurse_up_to)
for folder in relative_path.parents:
for file in data_file_names:
files_to_check.append(recurse_up_to / folder / file)
return set([file for file in files_to_check if file.is_file()]) | [
"def _get_templates():\n SRC = _ARGS['SRC']\n TEMPLATE = _ARGS['TEMPLATE']\n \n templates = []\n files = list_files(SRC)\n for filename in files:\n name, extension = os.path.splitext(filename)\n if extension == TEMPLATE:\n templates.append(name + extension)\n if len(templates) == 0:\n raise Exception('No template \\'%s\\' files found.\\n\\t%s\\n\\t%s' %\n (TEMPLATE, SRC, files))\n return templates",
"def filter_files_by_extension(\n files: list ,\n extensions: list\n):\n filtered_files = []\n for file in files:\n file_ext = os.path.splitext(file)[-1].lower()\n file_ext = _remove_dot_from_extension(file_ext)\n for extension in extensions:\n ext = _remove_dot_from_extension(extension).lower()\n # print(\"ext \\n\", ext)\n # print(\"file_ext \\n\", file_ext)\n if file_ext == ext:\n filtered_files.append(file)\n\n return filtered_files\n ...",
"def list_template_dir(\n metafunc, extensions, exclude_nested=True, template_type=\"\", sub_dirs=None\n):\n sub_dirs = [] if sub_dirs is None else sub_dirs\n filenames = []\n nested_files = []\n filenames = list_filenames_in_template_dir(\n metafunc, extensions, template_type, sub_dirs\n )\n if exclude_nested:\n nested_files = get_nested_files(filenames)\n return list(set(filenames) - set(nested_files))",
"def find_template_files(dir_name):\n list_files = []\n for dirName, subdirList, fileList in os.walk(dir_name):\n # Construct file path relative to the dir_name.\n for file_name in fileList:\n fp = os.path.join(dirName, file_name)\n r = re.compile(\".+\\.template$\")\n if r.match(fp): # if the file is a .template...\n # Save the template file for later.\n print_debug(\"Found template file {}\".format(fp))\n list_files.append(fp)\n return list_files",
"def find_files(extensions):\n\n return [fname for fname in os.listdir('.') if fname.endswith(extensions)]",
"def filter_by_extensions(files: tp.Sequence[pathlib.Path],\n extensions: tp.List[str]) -> tp.List[pathlib.Path]:\n return [file for file in files if \"\".join(file.suffixes) in extensions]",
"def _get_contents_by_ext(self, collection):\n contents_by_ext = defaultdict(list)\n collection_dir = os.path.join(self.root_dir, collection)\n for name in sorted(os.listdir(collection_dir)):\n path = os.path.join(collection_dir, name)\n if os.path.isfile(path):\n root, ext = os.path.splitext(name)\n contents_by_ext[ext].append(root)\n return contents_by_ext",
"def get_files_matching(extensions: Iterable[str], base_path: str = \"\", relative: bool = False):\n\n files = []\n for extension in extensions:\n files_matching = glob.glob(\n f\"{base_path}**/*.{extension}\", recursive=True)\n files.extend(\n map(os.path.abspath if not relative else lambda file: file,\n files_matching)\n )\n\n return files",
"def find_alternative_files(filepath, extensions_map):\n filepath = Path(filepath)\n try:\n all_paths = files_with_matching_stem(filepath)\n base_names = filter_extensions(all_paths, extensions_map)\n if len(base_names) > 1:\n print(\"Warning: More than one file was found: \"\n \"{}. Taking the first by default\".format(base_names))\n return base_names[0]\n except Exception as e:\n raise ImportError(\"Failed to find a file for {} from types {}. \"\n \"Reason: {}\".format(filepath, extensions_map, e))",
"def get_templates(template_folder, search_term=''):\n return [template for template in os.listdir(template_folder)\n if search_term in template]",
"def find_exts(\n files: List[str],\n) -> Tuple[List[str], Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]]:\n wav_files = {}\n other_audio_files = {}\n lab_files = {}\n textgrid_files = {}\n identifiers = []\n for full_filename in files:\n filename, fext = os.path.splitext(full_filename)\n fext = fext.lower()\n if fext == \".wav\":\n wav_files[filename] = full_filename\n elif fext == \".lab\":\n lab_files[filename] = full_filename\n elif (\n fext == \".txt\" and filename not in lab_files\n ): # .lab files have higher priority than .txt files\n lab_files[filename] = full_filename\n elif fext == \".textgrid\":\n textgrid_files[filename] = full_filename\n elif fext in supported_audio_extensions and shutil.which(\"sox\") is not None:\n other_audio_files[filename] = full_filename\n if filename not in identifiers:\n identifiers.append(filename)\n return identifiers, wav_files, lab_files, textgrid_files, other_audio_files",
"def match_template(blobs, templates):\n results = list()\n for blob in blobs:\n template = next(t for t in templates if t['compound'] == blob['compound'])\n results.append({\n 't1': template['first'],\n 't2': template['second'],\n 'b1': blob['first'],\n 'b2': blob['second']\n })\n print(results)\n return results",
"def filter_extensions(filepaths, extensions_map):\n extensions = extensions_map.keys()\n return [f.name for f in filepaths if f.suffix in extensions]",
"def list_templates(self):\n templates = set()\n for p in self.get_searchpath():\n for dirpath, dirnames, filenames in walk(p):\n dirpath = dirpath[len(p) + 1:]\n if dirpath.startswith('.'):\n continue\n for filename in filenames:\n if filename.startswith('.'):\n continue\n templates.add(path.join(dirpath, filename).\n replace(path.sep, '/'))\n return sorted(templates)",
"def collect_files_with_extensions(self, extension: str) -> List[str]:\n occurrences = []\n for position in os.listdir(self.directory):\n if os.path.isdir(position):\n for file in os.listdir(position):\n if os.path.isfile(os.path.join(position, file)) and file.endswith(\n extension\n ):\n occurrences.append(os.path.join(self.directory, position, file))\n return occurrences",
"def extension_templates(self) -> List[str]:\n default = [self.extension_file(), \"mako\"]\n return self.options.get(\"extensions\").get(\"templates\", default)",
"def select_by_ext(self, ext, recursive=True):\n ext = [ext.strip().lower() for ext in ensure_list(ext)]\n\n def filters(p): return p.suffix.lower() in ext\n\n return self.select_file(filters, recursive)",
"def find_templates(input_dir):\n templates = []\n\n def template_finder(result, dirname):\n for obj in os.listdir(dirname):\n if obj.endswith('.mustache'):\n result.append(os.path.join(dirname, obj))\n\n dir_visitor(\n input_dir,\n functools.partial(template_finder, templates)\n )\n return templates",
"def handle_extensions(self, extensions=('html',)):\r\n ext_list = []\r\n for ext in extensions:\r\n ext_list.extend(ext.replace(' ', '').split(','))\r\n for i, ext in enumerate(ext_list):\r\n if not ext.startswith('.'):\r\n ext_list[i] = '.%s' % ext_list[i]\r\n return set(ext_list)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform x elementwise through an affine function y = exp(s)x + t where s = st[...,0] and t = st[...,1] with s.shape == x.shape == t.shape The Jacobian for this transformation is the coordinatewise product of the scaling factors J = prod(es[...,i],i) | def element_wise_affine(x, st, compute_jacobian=True):
es = torch.exp(st[..., 0])
t = st[..., 1]
logj = None
if compute_jacobian:
logj = torch.sum(torch.log(es), dim=-1)
return es * x + t, logj | [
"def inverse_element_wise_affine(x, st, compute_jacobian=True):\n es = torch.exp(-st[..., 0])\n t = st[..., 1]\n logj = None\n if compute_jacobian:\n logj = torch.sum(torch.log(es), dim=-1)\n\n return es * (x - t), logj",
"def affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]",
"def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _",
"def apply_affine(affine, points):\n return [tform_point(affine, p) for p in points]",
"def _affine(self, x, a, rev=False):\n\n # the entire coupling coefficient tensor is scaled down by a\n # factor of ten for stability and easier initialization.\n a *= 0.1\n ch = x.shape[1]\n\n sub_jac = self.clamp * self.tanh(a[:, :ch])\n if self.GIN:\n sub_jac -= self.mean_keep_dims(sub_jac, self.sum_dims)\n\n output_result = None\n if not rev:\n output_result = (x * self.exp(sub_jac) + a[:, ch:], self.sum(sub_jac, self.sum_dims))\n else:\n output_result = ((x - a[:, ch:]) * ops.exp(-sub_jac), -self.sum(sub_jac, self.sum_dims))\n return output_result",
"def temporal_affine(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n return out",
"def affine_transform(geom, matrix):\n if geom.is_empty:\n return geom\n if len(matrix) == 6:\n ndim = 2\n a, b, d, e, xoff, yoff = matrix\n if geom.has_z:\n ndim = 3\n i = 1.0\n c = f = g = h = zoff = 0.0\n matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff\n elif len(matrix) == 12:\n ndim = 3\n a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix\n if not geom.has_z:\n ndim = 2\n matrix = a, b, d, e, xoff, yoff\n else:\n raise ValueError(\"'matrix' expects either 6 or 12 coefficients\")\n\n def affine_pts(pts):\n \"\"\"Internal function to yield affine transform of coordinate tuples\"\"\"\n if ndim == 2:\n for x, y in pts:\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n yield (xp, yp)\n elif ndim == 3:\n for x, y, z in pts:\n xp = a * x + b * y + c * z + xoff\n yp = d * x + e * y + f * z + yoff\n zp = g * x + h * y + i * z + zoff\n yield (xp, yp, zp)\n\n # Process coordinates from each supported geometry type\n if geom.type in ('Point', 'LineString', 'LinearRing'):\n return type(geom)(list(affine_pts(geom.coords)))\n elif geom.type == 'Polygon':\n ring = geom.exterior\n shell = type(ring)(list(affine_pts(ring.coords)))\n holes = list(geom.interiors)\n for pos, ring in enumerate(holes):\n holes[pos] = type(ring)(list(affine_pts(ring.coords)))\n return type(geom)(shell, holes)\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n # TODO: fix GeometryCollection constructor\n return type(geom)([affine_transform(part, matrix)\n for part in geom.geoms])\n else:\n raise ValueError('Type %r not recognized' % geom.type)",
"def affine_mult(affine, coordinates):\n return np.dot(coordinates, affine[:3, :3].T) + affine[:3, -1]",
"def fit_affine(src, dst, weg=np.array([])):\n\n # -------------------------------\n # Normalize points\n # -------------------------------\n src, Ts = normalize_points(src)\n dst, Td = normalize_points(dst)\n\n # -------------------------------\n # Set parameters\n # -------------------------------\n # Get number of d-dimensional points\n n = len(src)\n # Check if weights are provided\n if not weg.any():\n weg = np.ones(n)\n\n # -------------------------------\n # Construct matrix A and vector b\n # -------------------------------\n A = np.zeros([2*n,6])\n b = np.zeros([2*n,1])\n W = np.zeros([2*n,2*n])\n for i in xrange(n):\n # Construct matrix A\n # ax = (x, y, 1, 0, 0, 0)\n # ay = (0, 0, 0, x, y, 1)\n A[i*2 ,:] = np.array([ src[i,0], src[i,1], 1, 0, 0, 0 ])\n A[i*2+1,:] = np.array([ 0, 0, 0, src[i,0], src[i,1], 1 ])\n # Construct vector b\n b[i*2 ] = dst[i,0]\n b[i*2+1] = dst[i,1]\n # Construct diagonal weigh matrix\n W[i*2 ,i*2 ] = weg[i]\n W[i*2+1,i*2+1] = weg[i]\n\n # -------------------------------\n # Estimate solution x\n # -------------------------------\n x = np.linalg.solve( (A.T.dot(W)).dot(A) , (A.T.dot(W)).dot(b) )\n T = np.vstack([np.reshape(x,(2,3)) ,[0,0,1]])\n\n # -------------------------------\n # Denormalize transformation\n # -------------------------------\n T, _, _, _ = np.linalg.lstsq(Td,T.dot(Ts))\n\n return T",
"def affine_transform(mtx=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])):\r\n\r\n def tfm(data):\r\n xy = data[:, :2]\r\n return (mtx @ np.vstack((xy.T[:2], np.ones(xy.T.shape[1]))))[:2]\r\n\r\n return tfm",
"def transform_affine(self, fixed):\n if len(fixed) == 2:\n label0 = fixed[0][0]\n label1 = fixed[1][0]\n\n p1 = self.points[label0]\n p2 = self.points[label1]\n p3 = np.array(fixed[0][1:3])\n p4 = np.array(fixed[1][1:3])\n\n theta = angle_between(p1, p2, p3, p4) * np.pi / 180\n\n scale = dist(p3, p4) / dist(p1, p2)\n s = np.sin(theta)\n c = np.cos(theta)\n rot = np.array([[c, -s],\n [s, c]]) * scale\n\n labels = self.points.keys()\n for label in labels:\n xy = self.points[label]\n xy2 = np.dot(rot, xy - p1) + p3\n self.points[label] = xy2\n\n elif len(fixed) > 2:\n mat = np.zeros((2*len(fixed), 6))\n vec = np.zeros(2*len(fixed))\n for i, f in enumerate(fixed):\n label = f[0]\n mat[2*i,0] = self.points[label][0]\n mat[2*i,1] = self.points[label][1] \n mat[2*i+1,2] = self.points[label][0] \n mat[2*i+1,3] = self.points[label][1] \n mat[2*i,4] = 1 \n mat[2*i+1,5] = 1 \n\n vec[2*i] = f[1]\n vec[2*i+1] = f[2]\n\n coeff, resid, rank, s = np.linalg.lstsq(mat, vec)\n a, b, c, d, e, f = tuple(coeff)\n\n labels = self.points.keys()\n for label in labels:\n x = self.points[label][0]\n y = self.points[label][1]\n\n x2 = a * x + b * y + e\n y2 = c * x + d * y + f\n self.points[label][0] = x2\n self.points[label][1] = y2",
"def _apply_affine_arraylike(i, j, k, affine_matrix):\n rotation = affine_matrix[:3, :3]\n translation = affine_matrix[:3, 3]\n res = np.zeros((i.size, 3))\n for idx, row in enumerate(i):\n res[idx,:] = rotation.dot([i[idx], j[idx], k[idx]]) + translation\n return np.transpose(res)",
"def estimate_stage_affine(t0, t1):\n src = np.array([t.tforms[0].translation for t in t0])\n dst = np.array([t.tforms[1].translation for t in t1])\n aff = renderapi.transform.AffineModel()\n aff.estimate(src, dst)\n return aff",
"def calc_affine(df):\n\tx0 = df.columns[0]\n\ty0 = df.index[0]\n\tdx = df.columns[1] - df.columns[0]\n\tdy = df.index[1] - df.index[0]\n\t\n\tt = affine.Affine(dx, 0, x0 , 0, dy ,y0 - dy) \n\t# y0 - dy because anker point is in the south!\n\treturn t",
"def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache",
"def __affine_geo_transformation(x, y, gtr):\n\n # https://gdal.org/user/raster_data_model.html#affine-geotransform\n # Affine transformation rewritten for rasterio:\n gtr_x = gtr[2] + (x + 0.5) * gtr[0] + (y + 0.5) * gtr[1]\n gtr_y = gtr[5] + (x + 0.5) * gtr[3] + (y + 0.5) * gtr[4]\n\n return gtr_x, gtr_y",
"def transform(self, S, typ):\n if typ == 'prep':\n Si = S.get_transform_matrix_inverse()\n self.set_value(_np.dot(Si, self))\n elif typ == 'effect':\n Smx = S.get_transform_matrix()\n self.set_value(_np.dot(_np.transpose(Smx),self)) \n #Evec^T --> ( Evec^T * S )^T\n else:\n raise ValueError(\"Invalid typ argument: %s\" % typ)",
"def affine_forward(x,w,b):\n out=None\n N=x.shape[0]\n x_row=x.reshape(N,-1)\n out=np.dot(x_row,w)+b\n cache=(x,w,b)\n return out,cache",
"def affine_forward(x, w, b):\n ############################################################################\n # TODO: Implement the affine forward pass. Store the result in 'out'. You #\n # will need to reshape the input into rows. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n D,M = w.shape\n # reshape get a new x\n new_x = x.reshape(N,D)\n # get the output\n out = np.dot(new_x,w) + np.expand_dims(b,axis=0)\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform x elementwise through an affine function y = exp(s)(x t) where s = st[...,0] and t = st[...,1] with s.shape == x.shape == t.shape This is the inverse of `element_wise_affine` above for the same set of parameters st The Jacobian for this transformation is the coordinatewise product of the scaling factors J = prod(es[...,i],i) | def inverse_element_wise_affine(x, st, compute_jacobian=True):
es = torch.exp(-st[..., 0])
t = st[..., 1]
logj = None
if compute_jacobian:
logj = torch.sum(torch.log(es), dim=-1)
return es * (x - t), logj | [
"def element_wise_affine(x, st, compute_jacobian=True):\n es = torch.exp(st[..., 0])\n t = st[..., 1]\n logj = None\n if compute_jacobian:\n logj = torch.sum(torch.log(es), dim=-1)\n\n return es * x + t, logj",
"def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _",
"def apply_affine(affine, points):\n return [tform_point(affine, p) for p in points]",
"def affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]",
"def affine_mult(affine, coordinates):\n return np.dot(coordinates, affine[:3, :3].T) + affine[:3, -1]",
"def temporal_affine(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n return out",
"def _affine(self, x, a, rev=False):\n\n # the entire coupling coefficient tensor is scaled down by a\n # factor of ten for stability and easier initialization.\n a *= 0.1\n ch = x.shape[1]\n\n sub_jac = self.clamp * self.tanh(a[:, :ch])\n if self.GIN:\n sub_jac -= self.mean_keep_dims(sub_jac, self.sum_dims)\n\n output_result = None\n if not rev:\n output_result = (x * self.exp(sub_jac) + a[:, ch:], self.sum(sub_jac, self.sum_dims))\n else:\n output_result = ((x - a[:, ch:]) * ops.exp(-sub_jac), -self.sum(sub_jac, self.sum_dims))\n return output_result",
"def affine_transform(geom, matrix):\n if geom.is_empty:\n return geom\n if len(matrix) == 6:\n ndim = 2\n a, b, d, e, xoff, yoff = matrix\n if geom.has_z:\n ndim = 3\n i = 1.0\n c = f = g = h = zoff = 0.0\n matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff\n elif len(matrix) == 12:\n ndim = 3\n a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix\n if not geom.has_z:\n ndim = 2\n matrix = a, b, d, e, xoff, yoff\n else:\n raise ValueError(\"'matrix' expects either 6 or 12 coefficients\")\n\n def affine_pts(pts):\n \"\"\"Internal function to yield affine transform of coordinate tuples\"\"\"\n if ndim == 2:\n for x, y in pts:\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n yield (xp, yp)\n elif ndim == 3:\n for x, y, z in pts:\n xp = a * x + b * y + c * z + xoff\n yp = d * x + e * y + f * z + yoff\n zp = g * x + h * y + i * z + zoff\n yield (xp, yp, zp)\n\n # Process coordinates from each supported geometry type\n if geom.type in ('Point', 'LineString', 'LinearRing'):\n return type(geom)(list(affine_pts(geom.coords)))\n elif geom.type == 'Polygon':\n ring = geom.exterior\n shell = type(ring)(list(affine_pts(ring.coords)))\n holes = list(geom.interiors)\n for pos, ring in enumerate(holes):\n holes[pos] = type(ring)(list(affine_pts(ring.coords)))\n return type(geom)(shell, holes)\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n # TODO: fix GeometryCollection constructor\n return type(geom)([affine_transform(part, matrix)\n for part in geom.geoms])\n else:\n raise ValueError('Type %r not recognized' % geom.type)",
"def _apply_affine_arraylike(i, j, k, affine_matrix):\n rotation = affine_matrix[:3, :3]\n translation = affine_matrix[:3, 3]\n res = np.zeros((i.size, 3))\n for idx, row in enumerate(i):\n res[idx,:] = rotation.dot([i[idx], j[idx], k[idx]]) + translation\n return np.transpose(res)",
"def fit_affine(src, dst, weg=np.array([])):\n\n # -------------------------------\n # Normalize points\n # -------------------------------\n src, Ts = normalize_points(src)\n dst, Td = normalize_points(dst)\n\n # -------------------------------\n # Set parameters\n # -------------------------------\n # Get number of d-dimensional points\n n = len(src)\n # Check if weights are provided\n if not weg.any():\n weg = np.ones(n)\n\n # -------------------------------\n # Construct matrix A and vector b\n # -------------------------------\n A = np.zeros([2*n,6])\n b = np.zeros([2*n,1])\n W = np.zeros([2*n,2*n])\n for i in xrange(n):\n # Construct matrix A\n # ax = (x, y, 1, 0, 0, 0)\n # ay = (0, 0, 0, x, y, 1)\n A[i*2 ,:] = np.array([ src[i,0], src[i,1], 1, 0, 0, 0 ])\n A[i*2+1,:] = np.array([ 0, 0, 0, src[i,0], src[i,1], 1 ])\n # Construct vector b\n b[i*2 ] = dst[i,0]\n b[i*2+1] = dst[i,1]\n # Construct diagonal weigh matrix\n W[i*2 ,i*2 ] = weg[i]\n W[i*2+1,i*2+1] = weg[i]\n\n # -------------------------------\n # Estimate solution x\n # -------------------------------\n x = np.linalg.solve( (A.T.dot(W)).dot(A) , (A.T.dot(W)).dot(b) )\n T = np.vstack([np.reshape(x,(2,3)) ,[0,0,1]])\n\n # -------------------------------\n # Denormalize transformation\n # -------------------------------\n T, _, _, _ = np.linalg.lstsq(Td,T.dot(Ts))\n\n return T",
"def affine_transform(mtx=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])):\r\n\r\n def tfm(data):\r\n xy = data[:, :2]\r\n return (mtx @ np.vstack((xy.T[:2], np.ones(xy.T.shape[1]))))[:2]\r\n\r\n return tfm",
"def affine_forward(x,w,b):\n out=None\n N=x.shape[0]\n x_row=x.reshape(N,-1)\n out=np.dot(x_row,w)+b\n cache=(x,w,b)\n return out,cache",
"def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache",
"def affine_forward(x, w, b):\n ############################################################################\n # TODO: Implement the affine forward pass. Store the result in 'out'. You #\n # will need to reshape the input into rows. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n D,M = w.shape\n # reshape get a new x\n new_x = x.reshape(N,D)\n # get the output\n out = np.dot(new_x,w) + np.expand_dims(b,axis=0)\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out",
"def affine_forward(x, w, b):\n out = None\n \n # reshape the input into (N, d_1 *...* d_k)\n input_shape = x.shape\n prod = 1\n for i in range(1,len(input_shape)):\n prod *= input_shape[i]\n\n a = x.reshape(x.shape[0],prod)\n out = np.dot(a,w) + b\n \n cache = (x, w, b)\n return out, cache",
"def affine_forward(x, w, b):\n num_train = x.shape[0]\n x_flatten = x.reshape((num_train, -1))\n out = np.dot(x_flatten, w) + b\n cache = (x, w, b)\n return out, cache",
"def estimate_stage_affine(t0, t1):\n src = np.array([t.tforms[0].translation for t in t0])\n dst = np.array([t.tforms[1].translation for t in t1])\n aff = renderapi.transform.AffineModel()\n aff.estimate(src, dst)\n return aff",
"def exponential_expansion(x, columns):\n exp = np.exp(x[:, columns])\n return np.c_[x, exp]",
"def geotransform_to_affine(geot):\n c, a, b, f, d, e = list(geot)\n return rio.Affine(a, b, c, d, e, f)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the axis ranges from proviuded Plot or renderer. | def initialize_axis_ranges(self, plot, transform=None):
if transform is None:
def transform(x):
return x
elif isinstance(transform, int):
ndigits = transform
def transform(x):
return round(x, ndigits)
# Avoid UI polluting with non-sensical digits
self.x_axis_range_low = transform(plot.x_axis.mapper.range.low)
self.auto_x_axis_range_low = self.x_axis_range_low
self.x_axis_range_high = transform(plot.x_axis.mapper.range.high)
self.auto_x_axis_range_high = self.x_axis_range_high
self.y_axis_range_low = transform(plot.y_axis.mapper.range.low)
self.auto_y_axis_range_low = self.y_axis_range_low
self.y_axis_range_high = transform(plot.y_axis.mapper.range.high)
self.auto_y_axis_range_high = self.y_axis_range_high | [
"def _add_axes_range_sliders(self):\n self.axes_range_sliders = dict()\n\n default_range_x = (self.model.state_variable_range[self.svx][1] -\n self.model.state_variable_range[self.svx][0])\n default_range_y = (self.model.state_variable_range[self.svy][1] -\n self.model.state_variable_range[self.svy][0])\n min_val_x = self.model.state_variable_range[self.svx][0] - 4.0 * default_range_x\n max_val_x = self.model.state_variable_range[self.svx][1] + 4.0 * default_range_x\n min_val_y = self.model.state_variable_range[self.svy][0] - 4.0 * default_range_y\n max_val_y = self.model.state_variable_range[self.svy][1] + 4.0 * default_range_y\n\n sax = self.ipp_fig.add_axes([0.04, 0.835, 0.125, 0.025],\n axisbg=AXCOLOUR)\n sl_x_min = Slider(sax, \"xlo\", min_val_x, max_val_x,\n valinit=self.model.state_variable_range[self.svx][0])\n sl_x_min.on_changed(self._update_range)\n\n sax = self.ipp_fig.add_axes([0.04, 0.8, 0.125, 0.025], axisbg=AXCOLOUR)\n sl_x_max = Slider(sax, \"xhi\", min_val_x, max_val_x, valinit=self.model.state_variable_range[self.svx][1])\n sl_x_max.on_changed(self._update_range)\n\n sax = self.ipp_fig.add_axes([0.04, 0.765, 0.125, 0.025], axisbg=AXCOLOUR)\n sl_y_min = Slider(sax, \"ylo\", min_val_y, max_val_y, valinit=self.model.state_variable_range[self.svy][0])\n sl_y_min.on_changed(self._update_range)\n\n sax = self.ipp_fig.add_axes([0.04, 0.73, 0.125, 0.025], axisbg=AXCOLOUR)\n sl_y_max = Slider(sax, \"yhi\", min_val_y, max_val_y, valinit=self.model.state_variable_range[self.svy][1])\n sl_y_max.on_changed(self._update_range)\n\n self.axes_range_sliders[\"sl_x_min\"] = sl_x_min\n self.axes_range_sliders[\"sl_x_max\"] = sl_x_max\n self.axes_range_sliders[\"sl_y_min\"] = sl_y_min\n self.axes_range_sliders[\"sl_y_max\"] = sl_y_max",
"def resolve_axes(self):\n\t\tself.x_axis = axis_factory(self.x_axis_slug, x=True)\n\t\tself.y_axis = axis_factory(self.y_axis_slug, y=True)",
"def set_axes_ranges(self, ir, ic, ranges):\n\n if self.plot_func == 'plot_heatmap':\n return\n\n # X-axis\n if self.axes.share_x:\n xvals = ['xmin', 'xmax', 'x2min', 'x2max']\n for xval in xvals:\n xx = None\n for irow in range(0, self.nrow):\n for icol in range(0, self.ncol):\n if ranges[irow, icol][xval] is not None:\n if irow == 0 and icol == 0:\n xx = ranges[irow, icol][xval]\n elif 'min' in xval:\n xx = min(xx, ranges[irow, icol][xval])\n else:\n xx = max(xx, ranges[irow, icol][xval])\n\n if xx is not None and xval == 'xmin':\n self.axes.obj[ir, ic].x_range=bm.Range1d(start=xx)\n elif xx is not None and xval == 'x2min':\n self.axes2.obj[ir, ic].x_range=bm.Range1d(start=xx)\n elif xx is not None and xval == 'xmax':\n self.axes.obj[ir, ic].x_range=bm.Range1d(end=xx)\n elif xx is not None and xval == 'x2max':\n self.axes2.obj[ir, ic].x_range=bm.Range1d(end=xx)\n else:\n if ranges[ir, ic]['xmin'] is not None:\n self.axes.obj[ir, ic].x_range=bm.Range1d(start=ranges[ir, ic]['xmin'])\n if ranges[ir, ic]['x2min'] is not None:\n self.axes2.obj[ir, ic].x_range=bm.Range1d(start=ranges[ir, ic]['x2min'])\n if ranges[ir, ic]['xmax'] is not None:\n self.axes.obj[ir, ic].x_range=bm.Range1d(end=ranges[ir, ic]['xmax'])\n if ranges[ir, ic]['x2max'] is not None:\n self.axes2.obj[ir, ic].x_range=bm.Range1d(end=ranges[ir, ic]['x2max'])\n\n # Y-axis\n if self.axes.share_y:\n yvals = ['ymin', 'ymax', 'y2min', 'y2max']\n for yval in yvals:\n yy = None\n for irow in range(0, self.nrow):\n for icol in range(0, self.ncol):\n if ranges[irow, icol][yval] is not None:\n if irow == 0 and icol == 0:\n yy = ranges[irow, icol][yval]\n elif 'min' in yval:\n yy = min(yy, ranges[irow, icol][yval])\n else:\n yy = max(yy, ranges[irow, icol][yval])\n\n if yy is not None and yval == 'ymin':\n self.axes.obj[ir, ic].y_range=bm.Range1d(start=yy)\n elif yy is not None and yval == 'y2min':\n self.axes2.obj[ir, ic].y_range=bm.Range1d(start=yy)\n elif yy is not None and yval == 'ymax':\n self.axes.obj[ir, ic].y_range=bm.Range1d(end=yy)\n elif yy is not None and yval == 'y2max':\n self.axes2.obj[ir, ic].y_range=bm.Range1d(end=yy)\n else:\n if ranges[ir, ic]['ymin'] is not None:\n self.axes.obj[ir, ic].y_range=bm.Range1d(start=ranges[ir, ic]['ymin'])\n if ranges[ir, ic]['y2min'] is not None:\n self.axes2.obj[ir, ic].y_range=bm.Range1d(start=ranges[ir, ic]['y2min'])\n if ranges[ir, ic]['ymax'] is not None:\n self.axes.obj[ir, ic].y_range=bm.Range1d(end=ranges[ir, ic]['ymax'])\n if ranges[ir, ic]['y2max'] is not None:\n self.axes2.obj[ir, ic].y_range=bm.Range1d(end=ranges[ir, ic]['y2max'])",
"def set_axes_ranges(self, ir, ic, ranges):\n\n if self.plot_func == 'plot_heatmap':\n return\n\n # X-axis\n if self.axes.share_x:\n xvals = ['xmin', 'xmax', 'x2min', 'x2max']\n for xval in xvals:\n xx = None\n for irow in range(0, self.nrow):\n for icol in range(0, self.ncol):\n if ranges[irow, icol][xval] is not None:\n if irow == 0 and icol == 0:\n xx = ranges[irow, icol][xval]\n elif 'min' in xval:\n xx = min(xx, ranges[irow, icol][xval])\n else:\n xx = max(xx, ranges[irow, icol][xval])\n\n if xx is not None and xval == 'xmin':\n self.axes.obj[ir, ic].set_xlim(left=xx)\n elif xx is not None and xval == 'x2min':\n self.axes2.obj[ir, ic].set_xlim(left=xx)\n elif xx is not None and xval == 'xmax':\n self.axes.obj[ir, ic].set_xlim(right=xx)\n elif xx is not None and xval == 'x2max':\n self.axes2.obj[ir, ic].set_xlim(right=xx)\n else:\n if ranges[ir, ic]['xmin'] is not None:\n self.axes.obj[ir, ic].set_xlim(left=ranges[ir, ic]['xmin'])\n if ranges[ir, ic]['x2min'] is not None:\n self.axes2.obj[ir, ic].set_xlim(left=ranges[ir, ic]['x2min'])\n if ranges[ir, ic]['xmax'] is not None:\n self.axes.obj[ir, ic].set_xlim(right=ranges[ir, ic]['xmax'])\n if ranges[ir, ic]['x2max'] is not None:\n self.axes2.obj[ir, ic].set_xlim(right=ranges[ir, ic]['x2max'])\n\n # Y-axis\n if self.axes.share_y:\n yvals = ['ymin', 'ymax', 'y2min', 'y2max']\n for yval in yvals:\n yy = None\n for irow in range(0, self.nrow):\n for icol in range(0, self.ncol):\n if ranges[irow, icol][yval] is not None:\n if irow == 0 and icol == 0:\n yy = ranges[irow, icol][yval]\n elif 'min' in yval:\n yy = min(yy, ranges[irow, icol][yval])\n else:\n yy = max(yy, ranges[irow, icol][yval])\n\n if yy is not None and yval == 'ymin':\n self.axes.obj[ir, ic].set_ylim(bottom=yy)\n elif yy is not None and yval == 'y2min':\n self.axes2.obj[ir, ic].set_ylim(bottom=yy)\n elif yy is not None and yval == 'ymax':\n self.axes.obj[ir, ic].set_ylim(top=yy)\n elif yy is not None and yval == 'y2max':\n self.axes2.obj[ir, ic].set_ylim(top=yy)\n else:\n if ranges[ir, ic]['ymin'] is not None:\n self.axes.obj[ir, ic].set_ylim(bottom=ranges[ir, ic]['ymin'])\n if ranges[ir, ic]['y2min'] is not None:\n self.axes2.obj[ir, ic].set_ylim(bottom=ranges[ir, ic]['y2min'])\n if ranges[ir, ic]['ymax'] is not None:\n self.axes.obj[ir, ic].set_ylim(top=ranges[ir, ic]['ymax'])\n if ranges[ir, ic]['y2max'] is not None:\n self.axes2.obj[ir, ic].set_ylim(top=ranges[ir, ic]['y2max'])",
"def _shared_axis_range(self, plots, specs, range_type, axis_type, pos):\n dim_range = None\n categorical = range_type is FactorRange\n for plot in plots:\n if plot is None or specs is None:\n continue\n ax = 'x' if pos == 0 else 'y'\n plot_range = getattr(plot, f'{ax}_range', None)\n axes = getattr(plot, f'{ax}axis', None)\n extra_ranges = getattr(plot, f'extra_{ax}_ranges', {})\n\n if (\n plot_range and plot_range.tags and\n match_dim_specs(plot_range.tags[0], specs) and\n match_ax_type(axes[0], axis_type) and\n not (categorical and not isinstance(dim_range, FactorRange))\n ):\n dim_range = plot_range\n\n if dim_range is not None:\n break\n\n for extra_range in extra_ranges.values():\n if (\n extra_range.tags and match_dim_specs(extra_range.tags[0], specs) and\n match_yaxis_type_to_range(axes, axis_type, extra_range.name) and\n not (categorical and not isinstance(dim_range, FactorRange))\n ):\n dim_range = extra_range\n break\n\n return dim_range",
"def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )",
"def _init_range(self):\n if self.CONTINUOUS:\n self.val_min = min(self.vals)\n self.val_max = max(self.vals)\n buffer_width = (self.val_max - self.val_min) / 10\n\n self.range_min = self.val_min - buffer_width\n self.range_max = self.val_max + buffer_width\n self.subrange_min = self.val_min\n self.subrange_max = self.val_max\n\n else:\n self.range_min = -0.5\n self.range_max = len(self.steps)-0.5\n\n self.vals = [self.indexify(val)\n for val in self.vals]\n \n self.subrange_min = self.range_min + 0.5\n self.subrange_max = self.range_max - 0.5\n\n self.vals_hist = list_to_hist(self.vals)\n self.full_range = (self.range_min, self.range_max)",
"def __init__(self, axes=None, scale_factor=1.0, offset=0.0,\n periodic_boundaries=None):\n self.periodic_boundaries = periodic_boundaries\n self.axes = numpy.array(Space.AXES[axes])\n self.scale_factor = scale_factor\n self.offset = offset",
"def set_range(self, axis: int, range: Sequence[Union[int, float]]):\n if axis < 0:\n axis += self.ndim\n if axis < 0:\n raise ValueError(\n f'axis is negative, expected positive, got {axis}'\n )\n if self.range[axis] != range:\n self._range[axis] = range\n self.events.range(axis=axis)",
"def x_axis_setup(self,ax,min_,max_,interval=1):\n xinterval = ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(interval))\n xlim = ax.set_xlim(min_,max_)\n return xinterval,xlim",
"def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)",
"def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)",
"def update_plots_using_region(self):\n self.frequency_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.resistance_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.temperature_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.pressure_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.humidity_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)",
"def updateRange(self, axes):\n tCELMAexists = any(p.CELMAexists for p in self.getTemporalPlots())\n if tCELMAexists:\n return\n limits = axes.get_xlim()\n\n self.setSliderRange(limits)\n\n self.editCELMAstartTime.setText(\"{:.6f}\".format(limits[0]))\n self.editCELMAendTime.setText(\"{:.6f}\".format(limits[1]))",
"def make_range_frame(self):\n xtrans = transforms.blended_transform_factory(\n self.axes.transData, self.axes.transAxes\n )\n intervalx = interval_as_array(self.axes.dataLim.intervalx)\n\n ytrans = transforms.blended_transform_factory(\n self.axes.transAxes, self.axes.transData\n )\n intervaly = interval_as_array(self.axes.dataLim.intervaly)\n\n xline = LineCollection(\n segments=[[(intervalx[0], 0), (intervalx[1], 0)]],\n linewidths=[self.linewidth],\n colors=[self.color],\n transform=xtrans,\n zorder=10\n )\n yline = LineCollection(\n segments=[[(0, intervaly[0]), (0, intervaly[1])]],\n linewidths=[self.linewidth],\n colors=[self.color],\n transform=ytrans,\n zorder=10\n )\n\n return [xline, yline]",
"def set_default_axes(self, x_min=1, x_max=10, y_min=-24, y_max=24):\n self.x_min, self.x_max = x_min, x_max\n self.y_min, self.y_max = y_min, y_max",
"def render_range_init():\n\n # Adding/Checking ftrack render range attribute\n defaultRenderGlobals = pm.PyNode(\"defaultRenderGlobals\")\n render_range_set = False\n if hasattr(defaultRenderGlobals, \"ftrackRenderRangeSet\"):\n attr = pm.Attribute(\"defaultRenderGlobals.ftrackRenderRangeSet\")\n render_range_set = attr.get()\n else:\n pm.addAttr(\n defaultRenderGlobals,\n longName=\"ftrackRenderRangeSet\",\n defaultValue=True,\n attributeType=\"bool\"\n )\n\n if not render_range_set:\n\n task = ftrack.Task(os.environ[\"FTRACK_TASKID\"])\n\n startFrame = float(task.getParent().get(\"fstart\"))\n endFrame = float(task.getParent().get(\"fend\"))\n\n handles = float(task.getParent().get(\"handles\"))\n\n mc.warning(\n \"Setting render range to {0} {1} \".format(startFrame, endFrame)\n )\n\n # Add handles to start and end frame\n hsf = startFrame - handles\n hef = endFrame + handles\n\n defaultRenderGlobals.animation.set(True)\n defaultRenderGlobals.animationRange.set(1)\n defaultRenderGlobals.startFrame.set(hsf)\n defaultRenderGlobals.endFrame.set(hef)\n\n # Vray specific resolution\n if pm.objExists(\"vraySettings\"):\n vray_settings = pm.PyNode(\"vraySettings\")\n vray_settings.animType.set(1)",
"def set_xylims(self, xyrange,autoscale=True):\n xmin,xmax,ymin,ymax = xyrange\n if autoscale:\n xmin,xmax,ymin,ymax = self.data_range\n \n self.axes.set_xlim((xmin,xmax),emit=True)\n self.axes.set_ylim((ymin,ymax),emit=True)\n self.axes.update_datalim(((xmin,ymin),(xmax,ymax)))\n\n \n if autoscale:\n self.axes.set_xbound(self.axes.xaxis.get_major_locator().view_limits(xmin,xmax))\n self.axes.set_ybound(self.axes.yaxis.get_major_locator().view_limits(ymin,ymax))",
"def py_apply_limits(self, plot):\n if any(x is not None for x in self.x_lim):\n if self.x_lim[0] is not None: # at least left?\n if self.x_lim[1] is not None: # left and right?\n plot.set_xlim(left=self.x_lim[0], right=self.x_lim[1])\n else:\n plot.set_xlim(left=self.x_lim[0])\n else: # just right\n plot.set_xlim(rigt=self.x_lim[1])\n if any(y is not None for y in self.y_lim):\n if self.y_lim[0] is not None: # at least bottom?\n if self.y_lim[1] is not None:\n plot.set_ylim(bottom=self.y_lim[0], top=self.y_lim[1])\n else:\n plot.set_ylim(bottom=self.y_lim[0])\n else:\n plot.set_ylim(top=self.y_lim[1])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an archive from the given tree, upload, and untar it. | def upload_tar_from_git():
require("release", provided_by=[deploy])
tree = prompt("Please enter a branch or SHA1 to deploy", default="master")
local("git archive --format=tar %s | gzip > %s.tar.gz" % (tree, env['release']))
sudo("mkdir %(path)s/releases/%(release)s" % env)
put("%(release)s.tar.gz" % env, "%(path)s/packages/" % env, use_sudo=True)
sudo("cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz" % env)
local("rm %(release)s.tar.gz" % env) | [
"def git_archive_and_upload_tar():\n current_branch = str(subprocess.Popen('git branch | grep \"*\" | sed \"s/* //\"', \\\n shell=True,\\\n stdin=subprocess.PIPE, \\\n stdout=subprocess.PIPE).communicate()[0]).rstrip()\n env.git_branch = current_branch\n local('git archive --format=tar %(git_branch)s > %(release)s.tar' % env)\n local('touch `git describe HEAD`.tag')\n local('tar rvf %(release)s.tar `git describe HEAD`.tag; rm `git describe HEAD`.tag' % env)\n local('gzip %(release)s.tar' % env)\n run('; mkdir -p %(path)s/releases/%(release)s' % env)\n run('; mkdir -p %(path)s/packages/' % env)\n rsync_project('%(path)s/packages/' % env, '%(release)s.tar.gz' % env, extra_opts='-avz --progress')\n run('cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz' % env)\n local('rm %(release)s.tar.gz' % env)",
"def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))",
"def pack(obj):\n tar = tarfile.open(obj.path, \"w\")\n for path in obj.content:\n # TODO: check if file or directory\n tar.add(path)\n tar.close()\n obj.archive = obj.path\n obj.packed = True",
"def make_tar(args):\n TAR_FILE = args['TAR_FILE']\n MONGO_DUMP = args['MONGO_DUMP']\n tar = tarfile.open(TAR_FILE, \"w:gz\")\n\n for root, dir, files in os.walk(MONGO_DUMP):\n for file in files:\n fullpath = os.path.join(root, file)\n tar.add(fullpath)\n tar.close()",
"def untar(conn, tarball, path):\n conn.run(f\"tar xf {tarball} -C {path}\")",
"def create_tree_archive(destination):\n cwd = os.getcwd()\n os.chdir(destination)\n cmd = (\"rm -f Rfam.seed_tree.tar.gz && \"\n \"rm -Rf Rfam.seed_tree && \"\n \"mkdir Rfam.seed_tree && \"\n \"mv RF0*.seed_tree Rfam.seed_tree && \"\n \"tar -cf Rfam.seed_tree.tar.gz Rfam.seed_tree\")\n status = os.system(cmd.format(destination))\n if status:\n raise Exception('There was a problem generating Rfam.seed_tree.gz in {}'.format(destination))\n os.chdir(cwd)",
"def upload():\n\n # Create a tempfile to write the data to. delete=False because we will\n # close after writing, before processing, and this would normally cause a\n # tempfile to disappear.\n file = tempfile.NamedTemporaryFile(prefix='perun_upload', suffix='.tar.gz', delete=False)\n\n # store uploaded data\n file.write(request.get_data())\n file.close()\n\n # execute task\n result = executor.submit(process_tarball, file.name, read_only=app.config.get('KEYSTONE_READ_ONLY', False),\n target_domain_name=app.config.get('TARGET_DOMAIN_NAME', 'elixir'),\n default_role=app.config.get('DEFAULT_ROLE', 'user'),\n nested=app.config.get('NESTED', False),\n cloud_admin=app.config.get('CLOUD_ADMIN', True),\n base_dir=app.config.get('BASE_DIR', tempfile.mkdtemp()),\n support_quota=app.config.get('SUPPORT_QUOTA', False),\n cleanup=app.config.get('CLEANUP', False))\n\n # if task fails with an exception, the thread pool catches the exception,\n # stores it, then re-raises it when we call the result() function.\n try:\n result.result()\n except Exception:\n traceback.print_exc()\n\n if app.config.get('CLEANUP', False):\n os.unlink(file)\n\n return \"\"",
"def untar(tar_path, cleanup=False):\n tfile = tarfile.open(tar_path, 'r')\n tfile.extractall(os.path.dirname(tar_path))\n tfile.close()\n if cleanup:\n os.remove(tar_path)",
"def __git_archive():\n project = env.get('project', 'project')\n local(\"git archive HEAD -o %s.tar; git submodule foreach 'git archive --prefix ${path}/ HEAD -o ../temp.tar; gnutar -Af ../%s.tar ../temp.tar; rm ../temp.tar'; gzip -f %s.tar\" % (project, project, project))",
"def write_tar(tarname, items):\n tardir = tarname + '.d'\n rootdir = os.path.join(tardir, 'ImportRoot')\n os.makedirs(rootdir) # fail if exists, avoid accidental overwrite\n files = [] # list of metadata dicts for files in archive\n outputs = [] # list of (filesystem-path, archive-name) pairs\n\n for item_i, item in enumerate(items):\n attrs = item.setdefault('attrs', [])\n _ext = item.get('_ext', {})\n for tag in _ext.get('tags', []):\n attrs.append(\n attr_template({\"type\": \"label\", \"name\": \"tag\", \"value\": tag})\n )\n files.append(node_template(item))\n if '_ext' in files[-1]:\n del files[-1]['_ext']\n if item['_ext'].get('body'):\n datafile = \"md%07d.md\" % item_i\n files[-1]['dataFileName'] = datafile\n datapath = os.path.join(rootdir, datafile)\n outputs.append((datapath, datafile))\n with open(datapath, 'w') as out:\n out.write(item['_ext']['body'])\n\n order_nodes_attrs(files)\n\n # FIXME: could scan for clones by duplicate IDs here\n\n root = node_template(\n {\n \"title\": \"Import root\",\n \"dirFileName\": \"ImportRoot\",\n \"children\": files,\n }\n )\n data = {\"formatVersion\": 1, \"appVersion\": \"0.34.3\", \"files\": [root]}\n metafile = os.path.join(tardir, '!!!meta.json')\n json.dump(data, open(metafile, 'w'), indent='\\t')\n tar = tarfile.open(tarname, mode='w')\n tar.add(metafile, '!!!meta.json')\n tar.add(rootdir, \"ImportRoot/\") # add the directory by itself\n for path, name in outputs:\n # add the files\n tar.add(path, \"ImportRoot/%s\" % name)\n tar.close()",
"def untar(archive):\n log.info('Unpacking archive \"%s\".' % archive)\n tar = module.params['tar']\n tar_extra_options = shlex.split(module.params['tar_extra_options'])\n if not tar:\n tar = module.get_bin_path('tar', required=True)\n if archive.endswith('.gz'):\n uncompress = 'z'\n elif archive.endswith('.bz2'):\n uncompress = 'j'\n else:\n raise ValueError('Unsupported compression type: %s' % archive)\n options = ''.join(['x', uncompress, 'f'])\n args = [tar, options] + tar_extra_options + [archive]\n rc, out, err = module.run_command(args)\n log.info('untar: rc=%d out=%s err=%s', rc, out, err)\n if rc != 0:\n raise ValueError('tar command failed: %d' % rc)",
"def do_tree(ihelper, root_node, options):\n root_path = os.path.normpath(root_node) # remove any trailing slashes from given root path\n\n # get a list of files to be uploaded\n source_paths = get_source_paths(root_path, options)\n\n # remove the root path prefix from the upload files\n suffix_paths = make_suffix_paths(root_path, source_paths, options)\n\n # make a list of directories to be created in iRods and create them\n dir_paths = make_dir_paths(suffix_paths, options)\n for adir in dir_paths:\n ihelper.mkdir(adir, absolute=True) # make corresponding iRods directory\n\n # make a list of user-home-relative target file paths\n target_paths = make_target_paths(suffix_paths, options)\n\n # pair up the local source file paths and the iRods target file paths, then upload the files\n return [ do_file(ihelper, pair[0], pair[1], options) for pair in zip(source_paths, target_paths) ]",
"def make_archive(self, template_source, archive_file):\n # Strip GitHub's ?raw=true\n archive_file = archive_file.replace('.tar.gz?raw=true', '.tar.gz')\n archive_file = re.sub('.tar.gz', '', archive_file)\n with tarfile.open(archive_file + '.tar.gz', mode='w:gz') as archive:\n archive.add(template_source, recursive=True)\n print('Template archive created.')",
"def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )",
"def untar(path, fname, deleteTar=True):\n print(\"unpacking \" + fname)\n fullpath = os.path.join(path, fname)\n shutil.unpack_archive(fullpath, path)\n if deleteTar:\n os.remove(fullpath)",
"def pack(archive: Union[Path, str],\n paths: List[Union[Path, str]],\n cwd: Optional[Path] = None,\n exclude: Optional[List[Union[Path, str]]] = ()):\n archive = Path(archive)\n if cwd is None:\n cwd = Path.cwd()\n if archive.suffix == '.xz':\n archive = archive.with_suffix('')\n\n # Make sure all the paths have sane permissions.\n def walk(path):\n if path.is_symlink():\n return\n elif path.is_dir():\n # All dirs should be 755.\n mode = path.stat().st_mode & 0o777\n if mode != 0o755:\n path.chmod(0o755)\n\n for subpath in path.glob('*'):\n walk(subpath)\n elif path.is_file():\n # All scripts should be 755 while other files should be 644.\n mode = path.stat().st_mode & 0o777\n if mode in (0o755, 0o644):\n return\n if mode & 0o111:\n path.chmod(0o755)\n else:\n path.chmod(0o644)\n else:\n raise ValueError(f'{path}: unknown file type')\n\n logging.info('Forcing sane permissions on inputs')\n for path in paths:\n walk(cwd / path)\n\n logging.info('Creating %s tarball', archive.name)\n # We use relpath here to help out tar on platforms where it doesn't like\n # paths with colons in them (e.g. Windows). We have to construct the full\n # before running through relpath as relative archives will implicitly be\n # checked against os.getcwd rather than the explicit cwd.\n tar = os.path.relpath(cwd / archive, cwd)\n run(['tar', '--owner=0', '--group=0', '-cf', tar] +\n [f'--exclude={x}' for x in exclude] + ['--'] + paths, cwd=cwd)\n\n logging.info('Compressing tarball')\n run(['xz', '-f', '-T0', '-9', tar], cwd=cwd)",
"def create_tarball(self):\n\n tarball_filename = self.output_files['source.tar']\n targz_filename = self.output_files['source.tar.gz']\n\n print(f'Creating {tarball_filename}')\n product_dir = pathlib.Path(self.product_path)\n\n with pushd(product_dir):\n with tarfile.open(tarball_filename, 'w') as tar_fh:\n for root, dirs, files in os.walk('.'):\n for name in files:\n tar_fh.add(os.path.join(root, name)[2:])\n for name in dirs:\n if name == '.repo' or name == '.git':\n dirs.remove(name)\n else:\n tar_fh.add(os.path.join(root, name)[2:],\n recursive=False)\n\n if self.manifest_config.get('keep_git', False):\n print(f'Adding Git files to {tarball_filename}')\n # When keeping git files, need to dereference symlinks\n # so that the resulting .git directories work on Windows.\n # Because of this, we don't save the .repo directory\n # also, as that would double the size of the tarball\n # since mostly .repo just contains git dirs.\n with tarfile.open(tarball_filename, \"a\",\n dereference=True) as tar:\n for root, dirs, files in os.walk('.', followlinks=True):\n for name in dirs:\n if name == '.repo':\n dirs.remove(name)\n elif name == '.git':\n tar.add(os.path.join(root, name)[2:],\n recursive=False)\n if '/.git' in root:\n for name in files:\n # Git (or repo) sometimes creates broken\n # symlinks, like \"shallow\", and Python's\n # tarfile module chokes on those\n if os.path.exists(os.path.join(root, name)):\n tar.add(os.path.join(root, name)[2:],\n recursive=False)\n\n print(f'Compressing {tarball_filename}')\n\n with open(tarball_filename, 'rb') as f_in, \\\n gzip.open(targz_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n os.unlink(tarball_filename)",
"def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())",
"def unpack(c):\n c.run(\"tar -xzf raw_data.tar.gz --directory data\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Symlink to the new current release. | def symlink_current_release():
require("release", provided_by=[deploy])
with cd("%(path)s/releases" % env):
sudo("ln -s %(release)s current_tmp && mv -Tf current_tmp current" % env) | [
"def link(self, newpath):\n os.link(self, newpath)",
"def symlink_single_version(version):\r\n default_version = version.project.default_version\r\n log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=default_version, msg=\"Symlinking single_version\"))\r\n\r\n # The single_version directory\r\n symlink = version.project.single_version_symlink_path()\r\n run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1]))\r\n\r\n # Where the actual docs live\r\n docs_dir = os.path.join(settings.DOCROOT, version.project.slug, 'rtd-builds', default_version)\r\n run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink))",
"def hardlink(self, newpath):\n os.link(unicode(self), unicode(newpath))",
"def symlink_repo(outdir: str, old_name: str, new_name: str):\n old_path = os.path.join(outdir, old_name)\n dirname, basename = os.path.split(old_path)\n if not basename:\n old_path = dirname\n dirname, basename = os.path.split(old_path)\n print(dirname, basename)\n makedirs(os.path.join(dirname, '')) # End in / to make full path\n\n new_path = os.path.join(outdir, new_name, '')\n rel_path = os.path.relpath(new_path, dirname)\n os.symlink(rel_path, old_path)",
"def symlink(self, newlink):\n os.symlink(self, newlink)",
"def make_active(revision):\n run('ln -sfn {base}/{revision}/ {base}/newest'.format(base=BASE_PATH,\n revision=revision))",
"def link(self, version=None):\n\n if version is None:\n yaml_file = os.path.join(self.root_directory, '.clowder', 'clowder.yaml')\n path_output = fmt.path('.clowder/clowder.yaml')\n else:\n relative_path = os.path.join('.clowder', 'versions', version, 'clowder.yaml')\n path_output = fmt.path(relative_path)\n yaml_file = os.path.join(self.root_directory, relative_path)\n\n if not os.path.isfile(yaml_file):\n print(path_output + \" doesn't seem to exist\\n\")\n sys.exit(1)\n\n yaml_symlink = os.path.join(self.root_directory, 'clowder.yaml')\n print(' - Symlink ' + path_output)\n force_symlink(yaml_file, yaml_symlink)",
"def mklinkto(self, oldname):\n error.checked_call(os.link, str(oldname), str(self))",
"def symlink(self, path, target, *args, **kwargs): # pragma: no cover",
"def single_version_symlink_path(self):\r\n return os.path.join(self.doc_path, 'single_version')",
"def create_latest_symlink(destination_path):\n symlink_path = os.path.join(os.path.dirname(destination_path), 'latest')\n os.symlink(destination_path, symlink_path)",
"def install_links(self):\n # Make build log visible - it contains OpenFOAM-specific information\n with working_dir(self.projectdir):\n os.symlink(\n join_path(os.path.relpath(self.install_log_path)),\n join_path(\"log.\" + str(self.foam_arch)),\n )\n\n if not self.config[\"link\"]:\n return\n\n # ln -s platforms/linux64GccXXX/lib lib\n with working_dir(self.projectdir):\n if os.path.isdir(self.archlib):\n os.symlink(self.archlib, \"lib\")\n\n # (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .)\n with working_dir(join_path(self.projectdir, \"bin\")):\n for f in [\n f for f in glob.glob(join_path(\"..\", self.archbin, \"*\")) if os.path.isfile(f)\n ]:\n os.symlink(f, os.path.basename(f))",
"def link(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n full_destination_path = os.path.join(\n os.path.expandvars(self.path_destination), self.name\n )\n\n try:\n if self.sudo:\n spawn.process(\n f'ln -sfv \"{full_source_path}\" \"{full_destination_path}\"',\n sudo=True,\n )\n else:\n os.symlink(full_source_path, full_destination_path)\n except FileExistsError:\n message.error(\n \"Can't symlink, file already exists at destination. Attempting fix.\"\n )\n os.remove(full_destination_path)\n message.info(f\"Removed: '{full_destination_path}'\")\n os.symlink(full_source_path, full_destination_path)\n finally:\n message.info(\n f\"Symlink created: '{full_source_path}' <--> '{full_destination_path}'\"\n )\n else:\n message.error(\n f\"'{self.name}' has no source from which to create a link from.\"\n )",
"def symlink(self, dst):\r\n raise NotImplementedError()",
"def do_link(self):\n source = self._media.absolute_path()\n target = self._formatter.format(self._movie, self._media)\n logger.info(f\"Creating hard link from {source} to {target}\")\n target_dirname = os.path.dirname(target)\n os.makedirs(target_dirname, exist_ok=True)\n os.link(source, target)",
"def writelink(self, src):\n os.symlink(unicode(src), unicode(self))",
"def _makelink(self, name, target):\n name = os.path.join(self.working_dir, name)\n target = os.path.join(self.working_dir, target)\n if not os.path.isdir(os.path.dirname(name)):\n os.makedirs(os.path.dirname(name))\n os.symlink(target,name)",
"def _activate_new_source(self, source_dir, active_version_symlinks):\n # Switch the symlink and use our new project\n logger.info(\"Activating new source via symlinks\")\n for symlink in active_version_symlinks:\n logger.info(\"Symlinking %s\", symlink)\n symlink_dir, _ = os.path.split(symlink)\n with hide(*fab_output_hides):\n sudo('mkdir -p %s' % symlink_dir)\n sudo('rm -f %s' % symlink)\n sudo('ln -s %s %s' % (source_dir, symlink))\n\n # Clean out any stale pycs that may have been generated by queued\n # up processes that were using the old symlink\n with hide(*fab_output_hides):\n sudo('find %s -name \"*.pyc\" -delete' % source_dir)",
"def update_version(app, version):\r\n symlink = os.path.join(env.app_root, app)\r\n version_path = os.path.join(env.app_root, app+'-versions', version)\r\n sudo('ln -sfn \"%s\" \"%s\"' % (version_path, symlink))\r\n\r\n # Restart with upstart\r\n if env.apps[app].get('init'):\r\n restart(env.apps[app]['init'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove older releases, keeping the last `keep_num` intact. | def cleanup(keep_num=5):
keep_num = int(keep_num)
assert keep_num > 0, "[ERROR] keep_num must be > 0; refusing to proceed."
with cd("%(path)s/packages" % env):
package_files = sorted(run("ls -1").split())
package_files = [_.replace(".tar.gz", "") for _ in package_files]
with cd("%(path)s/releases" % env):
release_files = sorted(run("ls -1").split())
release_files.remove('current')
diff = set(package_files).symmetric_difference(set(release_files))
if diff:
raise Exception("[ERROR]: Package and release directories are out of sync;"
" refusing to proceed. Please fix this difference manually: %s" % diff)
package_files = package_files[:-keep_num]
release_files = release_files[:-keep_num]
with cd("%(path)s/packages" % env):
[sudo("rm %s.tar.gz" % _) for _ in package_files]
with cd("%(path)s/releases" % env):
[sudo("rm -r %s" % _) for _ in release_files] | [
"def delete_older_model_versions(self, model_name, n_versions_to_keep):\n\n def _get_use_time(version):\n use_time = version.get('lastUseTime') or version.get('createTime')\n return time.strptime(use_time, \"%Y-%m-%dT%H:%M:%SZ\")\n\n versions = self.__get_model_versions_with_metadata(model_name)\n versions.sort(key=_get_use_time, reverse=True)\n versions_name = [x['name'].split(\"/\")[-1] for x in versions]\n remove_versions = versions_name[n_versions_to_keep:]\n\n for version in remove_versions:\n self.delete_model_version(model_name, version)",
"def prune_versions(pipeline_stage, item_name, pipeline_options, keep_number=10):\n \n require_type(pipeline_stage, PLI_FILE_TYPE)\n raise Exception(\"Pruning not yet implemented\")",
"def _deleteOldVersionsByAge(self, model, max_age, number_to_keep=None):\r\n adapter = getVersionManagementAdapter(model)\r\n\r\n version_ids = self._getOldVersionIds(adapter)\r\n if number_to_keep is not None:\r\n if len(version_ids) < number_to_keep:\r\n return\r\n version_ids = version_ids[:-number_to_keep]\r\n\r\n then = datetime.now() - timedelta(days=max_age)\r\n oldest_time = DateTime(then.isoformat())\r\n\r\n index = None\r\n for id in version_ids:\r\n if adapter.getVersionModificationTime(id) >= oldest_time:\r\n break\r\n index = version_ids.index(id)\r\n\r\n delete_ids = []\r\n if index is not None:\r\n delete_ids = version_ids[:index]\r\n self._removed += len(delete_ids)\r\n model.manage_delObjects(delete_ids)",
"def do_clean(number=0):\n res = run(\"ls /data/web_static/releases\")\n\n number = int(number)\n list_names = str(res).split()\n date_list = []\n delete_list = []\n patt1 = re.compile(r'web_static_\\d{14}')\n for name in list_names:\n if re.fullmatch(patt1, name):\n date_list.append(int(name[11:]))\n else:\n delete_list.append(name)\n\n for elem in delete_list:\n run(\"rm -Rf /data/web_static/releases/\" + elem)\n\n if number == 0:\n list_names.remove(\"web_static_\" + str(max(date_list)))\n else:\n for _ in range(0, number):\n newer = max(date_list)\n list_names.remove(\"web_static_\" + str(newer))\n date_list.remove(newer)\n\n for names in list_names:\n run(\"rm -Rf /data/web_static/releases/\" + names)\n\n res = local(\"ls versions\")\n version_names = str(res).split()\n delete_list = []\n patt2 = re.compile(r'web_static_\\d{14}\\.tgz')\n for name in version_names:\n if re.fullmatch(patt2, name) is None:\n delete_list.append(name)\n for names in delete_list:\n local(\"rm -Rf versions/\" + names)\n for names in list_names:\n local(\"rm -Rf versions/\" + names + \".tgz\")",
"def prune(c):\n with conn.cd(utils.join(SALT_DEPLOY_PATH, utils.DEPLOY_RELEASES_DIR)):\n releases = [\n d.replace(\"./\", \"\").strip()\n for d in conn.run(\"find . -maxdepth 1 -mindepth 1 -type d\", pty=True)\n .stdout.strip()\n .split(\"\\n\")\n ]\n releases.sort()\n\n diff = len(releases) - int(SALT_KEEP_RELEASES)\n print(\n f\"Found {len(releases)} current releases; set to keep {SALT_KEEP_RELEASES}\"\n )\n if diff > 0:\n to_delete = releases[:diff]\n print(f\"Cleaning up {len(to_delete)} old release(s)\")\n conn.run(f\"rm -rf {' '.join(to_delete)}\")\n else:\n print(\"Nothing to do\")",
"def max_unused_versions_to_keep(self) -> int:\n return pulumi.get(self, \"max_unused_versions_to_keep\")",
"async def _remove_old_program_state(self) -> None:\n if self._keep_total <= 0:\n return\n versions = await self.get_versions()\n if versions is not None and len(versions) > self._keep_total:\n start = 1 if self._keep_first else 0\n stop = start - self._keep_total\n await asyncio.gather(*[self._remove(v) for v in versions[start:stop]])",
"def mark_newest_images(imageList, numberToRetain):\n sortedList = sorted(imageList, \n reverse=True, \n key=lambda image: image.created)\n for x in range(0, numberToRetain):\n sortedList[x].delete = False\n return sortedList",
"def do_clean(number=0):\n # Get all files from versions folder\n\n files = glob.glob('./versions/*')\n server_dir = '/data/web_static/releases'\n\n files.sort(key=os.path.getctime)\n count = 1\n for f in files:\n if count > int(number):\n f_name = f.split('/')[2].split('.')[0]\n local(\"rm -rf versions/{}.tgz\".format(f_name))\n run(\"sudo rm -rf \" + server_dir + \"/{}\".format(f_name))\n else:\n count += 1",
"def remove(min_packages_to_keep: int, older_than: date, channel, package_name):\n files = sorted(get_file_list(channel, package_name), reverse=True)\n print(f'Found {len(files)} packages for {package_name}')\n # Keep the most recent N\n print(f'Retaining most recent {min_packages_to_keep} packages for {package_name}')\n files = files[min_packages_to_keep:]\n # Remove packages older than the date\n files = versions_older_than(older_than, files)\n print(f'Removing {package_name} ({len(files)}): {[x[2] for x in files]}')\n if files:\n count = 0\n for files_chunk in split_every(50, files):\n specs = (parse_specs(f'{channel}/{package_name}/{f[1]}/{f[2]}')\n for f in files_chunk)\n args = Namespace(specs=specs, token=TOKEN, site='', force=True)\n remove_main(args)\n count += len(files_chunk)\n print(f'Removed {count}/{len(files)} {package_name} packages...')",
"def trim_lesser_versions(version):\n possible_versions[:] = [x for x in possible_versions if x >= version]",
"def _prune_older_binary_releases(packages):\n releases = {}\n\n for pkg in packages:\n key = (pkg.full_name, pkg.version, pkg.pyver, pkg.osarch)\n if key in releases:\n prevrel = releases[key]\n if pkg.pkg_version == prevrel.pkg_version:\n raise IOError('duplicate packages in repository: %s; %s' % \\\n (prevrel.relpath, pkg.relpath))\n elif pkg.pkg_version > prevrel.pkg_version:\n releases[key] = pkg\n else:\n releases[key] = pkg\n\n return releases.values()",
"def tag_release(self, rel_num):\n newest_files = self.getFiles(newest_version=True)\n\n for f in newest_files:\n self.addRelease(f, rel_num, commit=False)\n self.commitDB()\n return len(newest_files)",
"def prune_jars(self, jar_name, maxx):\n jars = self.list_jars_named(jar_name)\n srt = sorted(jars, key = lambda j : j['date'], reverse = True)\n delete = srt[maxx:]\n for d in delete:\n print \"Deleting: %s %d\" % (d['name'], d['date'])\n self.delete_jar(d['id'])\n return delete",
"def Remove(self, version_number):\n self.dict.pop(str(version_number))",
"def cleanup_old(self): # noqa\n latest_version = 'v{}'.format(self.version)\n self.app.log(\n 'Cleaning up RSs for releases older than {} (latest)'.format(latest_version),\n level=logging.DEBUG\n )\n\n # Cleanup controllers\n labels = {'heritage': 'drycc'}\n replica_sets_removal = []\n replica_sets = self._scheduler.rs.get(self.app.id, labels=labels).json()['items']\n if not replica_sets:\n replica_sets = []\n for replica_set in replica_sets:\n current_version = replica_set['metadata']['labels']['version']\n # skip the latest release\n if current_version == latest_version:\n continue\n\n # aggregate versions together to removal all at once\n if current_version not in replica_sets_removal:\n replica_sets_removal.append(current_version)\n\n if replica_sets_removal:\n self.app.log(\n 'Found the following versions to cleanup: {}'.format(', '.join(replica_sets_removal)), # noqa\n level=logging.DEBUG\n )\n\n # this is RC related\n for version in replica_sets_removal:\n self._delete_release_in_scheduler(self.app.id, version)\n\n # handle Deployments specific cleanups\n self._cleanup_deployment_secrets_and_configs(self.app.id)\n\n # Remove stray pods\n labels = {'heritage': 'drycc'}\n pods = self._scheduler.pod.get(self.app.id, labels=labels).json()['items']\n if not pods:\n pods = []\n for pod in pods:\n if self._scheduler.pod.deleted(pod):\n continue\n\n current_version = pod['metadata']['labels']['version']\n # skip the latest release\n if current_version == latest_version:\n continue\n\n try:\n self._scheduler.pod.delete(self.app.id, pod['metadata']['name'])\n except KubeHTTPException as e:\n # Sometimes k8s will manage to remove the pod from under us\n if e.response.status_code == 404:\n continue",
"def Remove(self, version_number):\r\n self.dict.pop(str(version_number))",
"def remove_backups(self, dbg=False):\n if not self.is_history():\n return None\n if dbg:\n print(\"Scrubbing history...\")\n to_remove = []\n for i in range(len(self.data('model_number')) - 1):\n smallest_future = np.min(self.data('model_number')[i + 1:])\n if self.data('model_number')[i] >= smallest_future:\n to_remove.append(i)\n if len(to_remove) == 0:\n if dbg:\n print(\"Already clean!\")\n return None\n if dbg:\n print(\"Removing {} lines.\".format(len(to_remove)))\n self.bulk_data = np.delete(self.bulk_data, to_remove)",
"def prune(self, keep=1):\n\n # make sure we are up to date\n self.load()\n\n if keep < 1:\n raise PruneToZeroError(len(self.snapshots))\n\n if len(self.snapshots) <= keep:\n return\n\n # delete the meatdata of old snapshots\n old, self.snapshots = self.snapshots[:-keep], self.snapshots[-keep:]\n\n for snapshot in old:\n snapshot.delete()\n\n # delete the prefixes we don't recognise\n known = set()\n\n for snapshot in self.snapshots:\n known.add(snapshot.prefix)\n known.update(d for d in snapshot.meta.get('files', ()))\n\n for prefix in self.envoy.prefixes():\n if prefix not in known:\n self.envoy.delete(prefix)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Give each Node uniform splits of data. Nodes will have same amounts of data. | def uniform_split(self, nr_agents):
indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist()
self.samples = self.partition(self.samples, indices, nr_agents)
self.labels = self.partition(self.labels, indices, nr_agents) | [
"def _split_node(self, split_ratio: float):\n if (self.num_nodes < len(split_ratio)):\n raise ValueError('in _split_node num of nodes are smaller than'\n 'number of splitted parts')\n\n split_graphs = []\n shuffled_node_indices = torch.randperm(self.num_nodes)\n split_offset = 0\n\n # perform `secure split` s.t. guarantees all splitted subgraph\n # contains at least one node.\n for i, split_ratio_i in enumerate(split_ratio):\n if i != len(split_ratio) - 1:\n num_split_i = \\\n 1 + int(split_ratio_i * (self.num_nodes - len(split_ratio)))\n nodes_split_i = shuffled_node_indices[\n split_offset: split_offset + num_split_i]\n split_offset += num_split_i\n else:\n nodes_split_i = shuffled_node_indices[split_offset:]\n # shallow copy all attributes\n graph_new = copy.copy(self)\n graph_new.node_label_index = nodes_split_i\n split_graphs.append(graph_new)\n return split_graphs",
"def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r",
"def split_network(self):\n disconnect_nodes(self.nodes[1], 2)\n disconnect_nodes(self.nodes[2], 1)\n self.sync_all([self.nodes[:2], self.nodes[2:]])",
"def _split_node(\n self,\n split_types: List[str],\n split_ratio: float,\n shuffle: bool = True\n ):\n if isinstance(split_types, list):\n for split_type in split_types:\n if split_type not in self.node_types:\n raise TypeError(\n \"all split_type in split_types need to be in \"\n f\"{self.node_types}, however split type: \"\n \"{split_type} is in split_types.\"\n )\n elif split_types is None:\n split_types = self.node_types\n else:\n if split_types not in self.node_types:\n raise TypeError(\n f\"split_types need to be in {self.node_types}, \"\n f\"however split_types is: {split_types}.\"\n )\n else:\n split_types = [split_types]\n\n for split_type, num_node_type in self.num_nodes(split_types).items():\n if num_node_type < len(split_ratio):\n raise ValueError(\n f\"In _split_node num of nodes of node_type: {split_type} \"\n \"are smaller than number of splitted parts.\"\n )\n split_graphs = []\n for _ in range(len(split_ratio)):\n graph_new = copy.copy(self)\n graph_new.node_label_index = {}\n graph_new.node_label = {}\n split_graphs.append(graph_new)\n\n for split_type in self.node_types:\n if split_type in split_types:\n split_type_nodes_length = self.num_nodes(split_type)\n if shuffle:\n split_type_node = self.node_label_index[split_type][\n torch.randperm(split_type_nodes_length)\n ]\n else:\n split_type_node = self.node_label_index[split_type]\n\n # used to indicate whether default splitting results in\n # empty splitted graphs\n split_empty_flag = False\n nodes_split_list = []\n\n # perform `default split`\n split_offset = 0\n for i, split_ratio_i in enumerate(split_ratio):\n if i != len(split_ratio) - 1:\n num_split_i = int(\n split_ratio_i * split_type_nodes_length\n )\n nodes_split_i = (\n split_type_node[\n split_offset:split_offset + num_split_i\n ]\n )\n split_offset += num_split_i\n else:\n nodes_split_i = split_type_node[split_offset:]\n if nodes_split_i.numel() == 0:\n split_empty_flag = True\n split_offset = 0\n nodes_split_list = []\n break\n nodes_split_list.append(nodes_split_i)\n\n if split_empty_flag:\n for i, split_ratio_i in enumerate(split_ratio):\n # perform `secure split` s.t. guarantees all\n # splitted subgraph of a split type contains at\n # least one node.\n if i != len(split_ratio) - 1:\n num_split_i = (\n 1 +\n int(\n split_ratio_i *\n (\n split_type_nodes_length\n - len(split_ratio)\n )\n )\n )\n nodes_split_i = (\n split_type_node[\n split_offset:split_offset + num_split_i\n ]\n )\n split_offset += num_split_i\n else:\n nodes_split_i = split_type_node[split_offset:]\n nodes_split_list.append(nodes_split_i)\n\n for idx, nodes_split_i in enumerate(nodes_split_list):\n split_graphs[idx].node_label_index[split_type] = (\n nodes_split_i\n )\n split_graphs[idx].node_label[split_type] = (\n self.node_label[split_type][nodes_split_i]\n )\n else:\n for idx, graph in enumerate(split_graphs):\n graph.node_label_index[split_type] = (\n self.node_label_index[split_type]\n )\n graph.node_label[split_type] = self.node_label[split_type]\n split_graphs[idx] = graph\n\n return split_graphs",
"def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 9,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]",
"def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)",
"def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)",
"def split_data(self):\n (train_data, train_labels) = (self._dataset[0][0][1000:] , self._dataset[0][1][1000:]) # 7982 samples\n (dev_data, dev_labels) = (self._dataset[0][0][:1000] , self._dataset[0][1][:1000]) # 1000 samples\n (test_data, test_labels) = self._dataset[1] # 2246 samples\n\n self._dataset = (train_data, train_labels), (dev_data, dev_labels), (test_data, test_labels)",
"def node_split(self, train_size: Union[int, float] = None, val_size: Union[int, float] = None,\n test_size: Union[int, float] = None, seed_size: Union[int, float] = None,\n train_size_per_class: Union[int, float] = None, val_size_per_class: Union[int, float] = None,\n test_size_per_class: Union[int, float] = None, seed_size_per_class: Union[int, float] = None,\n seed: List[int] = [], data_split: int = 2):\n self = node_class_split(self, train_size=train_size, val_size=val_size,\n test_size=test_size, seed_size=seed_size, train_size_per_class=train_size_per_class,\n val_size_per_class=val_size_per_class, test_size_per_class=test_size_per_class,\n seed_size_per_class=seed_size_per_class, seed=seed, data_split=data_split)",
"def split(self, fraction):\r\n ds = self.dataset\r\n\r\n train_size = int(self.size * fraction)\r\n trainset = DataLoader(ds.take(train_size), train_size)\r\n\r\n test_size = self.size - train_size\r\n testset = DataLoader(ds.skip(test_size), test_size)\r\n\r\n return trainset, testset",
"def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]",
"def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 80,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 2,\n }]",
"def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits",
"def determine_node_split(self,m1):\n\n # list of nodes within that module\n list_nods = list(self.index[m1])\n\n # randomly partition the list of nodes into 2\n nod_split_ind = np.random.randint(1,len(list_nods)) #can't pick the first node as the division\n\n ### CG: but it's ok to put up to the last because\n ## np.random.randint is exclusive on the second number\n n1 = set(list_nods[:nod_split_ind]) #at least 1 large\n n2 = set(list_nods[nod_split_ind:]) #at least 1 large\n\n return n1,n2",
"def split_molecules(dataset, args):\n train_size = int(args.train_p * len(dataset))\n val_size = int(args.val_p * len(dataset))\n\n np.random.shuffle(dataset)\n train = dataset[:train_size]\n val = dataset[train_size:train_size + val_size]\n test = dataset[train_size + val_size:]\n\n print(\"=\" * 50)\n print(\"Num train molecules: %d\" % len(train))\n print(\"Num val molecules: %d\" % len(val))\n print(\"Num test molecules: %d\" % len(test))\n print(\"=\" * 50)\n\n return train, val, test",
"def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)",
"def node_chunks(self):\n if self._node_chunks is None:\n n_chunks = min(self.max_nodes, self.chunks)\n self._node_chunks = np.array_split(np.arange(self.chunks),\n n_chunks)\n return self._node_chunks",
"def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function computes the distribution internal parameters from its two first moments. | def _compute_internals(self, moments):
[mean, stdv] = moments
internals = {}
internals['a'] = mean - np.sqrt(3) * stdv
internals['b'] = mean + np.sqrt(3) * stdv
return internals | [
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals",
"def _get_distribution_variables(self, R):\n if self.domain == \"Negative\":\n R_typ = self.param.R_n_typ\n # Particle-size distribution (area-weighted)\n f_a_dist = self.param.f_a_dist_n(R)\n elif self.domain == \"Positive\":\n R_typ = self.param.R_p_typ\n # Particle-size distribution (area-weighted)\n f_a_dist = self.param.f_a_dist_p(R)\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R)\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R)\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R ** 2) / pybamm.Integral(f_a_dist / R ** 2, R)\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.auxiliary_domains[\"secondary\"] == [self.domain.lower() + \" electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(\n f_a_dist_xav, [self.domain.lower() + \" electrode\"]\n )\n f_v_dist = pybamm.SecondaryBroadcast(\n f_v_dist_xav, [self.domain.lower() + \" electrode\"]\n )\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [self.domain.lower() + \" electrode\"]\n )\n\n variables = {\n self.domain + \" particle sizes\": R,\n self.domain + \" particle sizes [m]\": R * R_typ,\n self.domain + \" area-weighted particle-size\"\n + \" distribution\": f_a_dist,\n self.domain + \" area-weighted particle-size\"\n + \" distribution [m-1]\": f_a_dist / R_typ,\n self.domain + \" volume-weighted particle-size\"\n + \" distribution\": f_v_dist,\n self.domain + \" volume-weighted particle-size\"\n + \" distribution [m-1]\": f_v_dist / R_typ,\n self.domain + \" number-based particle-size\"\n + \" distribution\": f_num_dist,\n self.domain + \" number-based particle-size\"\n + \" distribution [m-1]\": f_num_dist / R_typ,\n self.domain + \" area-weighted\"\n + \" mean particle radius\": R_a_mean,\n self.domain + \" area-weighted\"\n + \" mean particle radius [m]\": R_a_mean * R_typ,\n self.domain + \" volume-weighted\"\n + \" mean particle radius\": R_v_mean,\n self.domain + \" volume-weighted\"\n + \" mean particle radius [m]\": R_v_mean * R_typ,\n self.domain + \" number-based\"\n + \" mean particle radius\": R_num_mean,\n self.domain + \" number-based\"\n + \" mean particle radius [m]\": R_num_mean * R_typ,\n self.domain + \" area-weighted particle-size\"\n + \" standard deviation\": sd_a,\n self.domain + \" area-weighted particle-size\"\n + \" standard deviation [m]\": sd_a * R_typ,\n self.domain + \" volume-weighted particle-size\"\n + \" standard deviation\": sd_v,\n self.domain + \" volume-weighted particle-size\"\n + \" standard deviation [m]\": sd_v * R_typ,\n self.domain + \" number-based particle-size\"\n + \" standard deviation\": sd_num,\n self.domain + \" number-based particle-size\"\n + \" standard deviation [m]\": sd_num * R_typ,\n # X-averaged distributions\n \"X-averaged \" + self.domain.lower() +\n \" area-weighted particle-size distribution\": f_a_dist_xav,\n \"X-averaged \" + self.domain.lower() +\n \" area-weighted particle-size distribution [m-1]\": f_a_dist_xav / R_typ,\n \"X-averaged \" + self.domain.lower() +\n \" volume-weighted particle-size distribution\": f_v_dist_xav,\n \"X-averaged \" + self.domain.lower() +\n \" volume-weighted particle-size distribution [m-1]\": f_v_dist_xav / R_typ,\n \"X-averaged \" + self.domain.lower() +\n \" number-based particle-size distribution\": f_num_dist_xav,\n \"X-averaged \" + self.domain.lower() +\n \" number-based particle-size distribution [m-1]\": f_num_dist_xav / R_typ,\n }\n\n return variables",
"def moments(self):",
"def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w",
"def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables",
"def get_gamma_distribution_params(mean, std):\n # mean = k * theta\n # var = std**2 = k * theta**2\n theta = std**2 / mean\n k = mean / theta\n return k, theta",
"def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma",
"def _params(self, inputs):\n if inputs.shape[-1] != self.n_inputs:\n raise ValueError(\n 'Invalid distribution parametrization - expected {} parameters, '\n 'got {}. Input shape: {}.'.format(\n self.n_inputs, inputs.shape[-1], inputs.shape\n )\n )\n n_dims = self._n_dims\n # Split the distribution inputs into two parts: mean and std.\n mean = inputs[..., :n_dims]\n if self._learn_std is not None:\n std = inputs[..., n_dims:]\n # Std is non-negative, so let's softplus it.\n std = tl.Softplus()(std + self._std)\n else:\n std = self._std\n # In case of constant or shared std, upsample it to the same dimensionality\n # as the means.\n std = jnp.broadcast_to(std, mean.shape)\n return (mean, std)",
"def moments2e(image):\n assert len(image.shape) == 2 # only for grayscale images\n x, y = mgrid[:image.shape[0],:image.shape[1]]\n moments = {}\n moments['mean_x'] = sum(x*image)/sum(image)\n moments['mean_y'] = sum(y*image)/sum(image)\n\n # raw or spatial moments\n moments['m00'] = sum(image)\n moments['m01'] = sum(x*image)\n moments['m10'] = sum(y*image)\n moments['m11'] = sum(y*x*image)\n moments['m02'] = sum(x**2*image)\n moments['m20'] = sum(y**2*image)\n moments['m12'] = sum(x*y**2*image)\n moments['m21'] = sum(x**2*y*image)\n moments['m03'] = sum(x**3*image)\n moments['m30'] = sum(y**3*image)\n\n # central moments\n # moments['mu01']= sum((y-moments['mean_y'])*image) # should be 0\n # moments['mu10']= sum((x-moments['mean_x'])*image) # should be 0\n moments['mu11'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])*image)\n moments['mu02'] = sum((y-moments['mean_y'])**2*image) # variance\n moments['mu20'] = sum((x-moments['mean_x'])**2*image) # variance\n moments['mu12'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])**2*image)\n moments['mu21'] = sum((x-moments['mean_x'])**2*(y-moments['mean_y'])*image)\n moments['mu03'] = sum((y-moments['mean_y'])**3*image)\n moments['mu30'] = sum((x-moments['mean_x'])**3*image)\n\n # opencv versions\n #moments['mu02'] = sum(image*(x-m01/m00)**2)\n #moments['mu02'] = sum(image*(x-y)**2)\n\n # wiki variations\n #moments['mu02'] = m20 - mean_y*m10\n #moments['mu20'] = m02 - mean_x*m01\n\n # central standardized or normalized or scale invariant moments\n moments['nu11'] = moments['mu11'] / sum(image)**(2/2+1)\n moments['nu12'] = moments['mu12'] / sum(image)**(3/2+1)\n moments['nu21'] = moments['mu21'] / sum(image)**(3/2+1)\n moments['nu20'] = moments['mu20'] / sum(image)**(2/2+1)\n moments['nu03'] = moments['mu03'] / sum(image)**(3/2+1) # skewness\n moments['nu30'] = moments['mu30'] / sum(image)**(3/2+1) # skewness\n return moments",
"def comp_moments(self):\n rr3dr = self.rr**3*np.log(self.rr[1]/self.rr[0])\n rr4dr = self.rr*rr3dr\n sp2mom0,sp2mom1,cs,cd = [],[],np.sqrt(4*np.pi),np.sqrt(4*np.pi/3.0)\n for sp,nmu in enumerate(self.sp2nmult):\n nfunct=sum(2*self.sp_mu2j[sp]+1)\n mom0 = np.zeros((nfunct))\n d = np.zeros((nfunct,3))\n for mu,[j,s] in enumerate(zip(self.sp_mu2j[sp],self.sp_mu2s[sp])):\n if j==0: mom0[s] = cs*sum(self.psi_log[sp][mu,:]*rr3dr)\n if j==1: d[s,1]=d[s+1,2]=d[s+2,0] = cd*sum(self.psi_log[sp][mu,:]*rr4dr)\n sp2mom0.append(mom0)\n sp2mom1.append(d)\n return sp2mom0,sp2mom1",
"def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])",
"def calc_posterior():\n\n a = [alpha, 1-alpha]\n ms = [x_obs, x_obs]\n Ss = map(lambda s: [[s**2]], sigmas)\n\n return pdf.MoG(a=a, ms=ms, Ss=Ss)",
"def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans",
"def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))",
"def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2",
"def PH2From3Moments (moms, prec=1e-14):\n\n m1, m2, m3 = moms\n\n # check moment boounds\n m2l = APH2ndMomentLowerBound(m1, 2) \n m3l = APH3rdMomentLowerBound(m1, m2, 2) \n m3u = APH3rdMomentUpperBound(m1, m2, 2) \n \n if m2<m2l:\n raise Exception(\"The given second moment is not feasible!\") \n if m3<m3l:\n raise Exception(\"The given third moment is not feasible (too small)!\")\n if m3>m3u:\n raise Exception(\"The given third moment is not feasible (too large)!\")\n \n # check if we have an exponential distribution\n if abs(m2/m1/m1-2.0) < prec:\n return (np.matrix([1]), np.matrix([[-1/m1]]))\n \n # calculate parameters\n b = 3.0*m1*m2-m3\n c = 3.0*m2*m2-2.0*m1*m3\n e = -2.0*m1*m1+m2\n a = b*b+6.0*c*e\n if a<0:\n a = 0\n a = math.sqrt(a)\n if c>0:\n lambda1 = (b - a) / c\n lambda2 = (b + a) / c\n p = (-b-6.0*m1*e+a) / (b+a)\n elif c<0:\n lambda1 = (b + a) / c\n lambda2 = (b - a) / c\n p = (b+6.0*m1*e+a) / (-b+a)\n elif c==0:\n lambda1 = 0\n lambda2 = 1.0 / m1\n p = 0\n \n # return the result\n return (np.matrix([p,1.0-p]), np.matrix([[-lambda1, lambda1], [0,-lambda2]]))",
"def calculate_distribution_param(self, state) -> np.ndarray:\n return self._neural_net(state, training=True)",
"def compute_prob_params(self,counts):\n num_transitions = self.num_transitions\n l = self.MC.number_of_nodes()\n totals = dict()\n for from_state in self.MC.nodes():\n tot = sum([counts[(from_state,to_state)] if ((from_state,to_state) in counts.keys()) else 0 for to_state in self.MC.nodes()])\n totals[from_state] = tot\n if tot > 0:\n for to_state in self.MC.nodes():\n # mean and std. dev of the corresponding beta distribution\n p = (counts[(from_state,to_state)]+1)/(tot+l) if ((from_state,to_state) in counts.keys()) else 1/(tot+l)\n self.MC[from_state][to_state]['mu'] = p\n self.MC[from_state][to_state]['sigma'] = np.sqrt(p*(1-p)/(tot+ (l+1)))\n else:\n for to_state in self.MC.nodes():\n p = 1/(tot+l)\n self.MC[from_state][to_state]['mu'] = p\n self.MC[from_state][to_state]['sigma'] = np.sqrt(p*(1-p)/(tot+ (l+1)))\n\n self.params = {(fs,ts):self.MC[fs][ts]['mu'] for (fs,ts) in self.MC.edges()}\n self.std = {(fs,ts):self.MC[fs][ts]['sigma'] for (fs,ts) in self.MC.edges()}\n self.state_totals = totals",
"def _find_ab_params(self):\n def curve(x, a, b):\n return 1.0 / (1.0 + a * x ** (2 * b))\n\n xv = np.linspace(0, self.spread * 3, 300)\n yv = np.zeros(xv.shape)\n yv[xv < self.min_dist] = 1.0\n yv[xv >= self.min_dist] = np.exp(\n -(xv[xv >= self.min_dist] - self.min_dist) / self.spread)\n params, covar = curve_fit(curve, xv, yv)\n self.a = params[0]\n self.b = params[1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function computes the distribution internal parameters from its two first moments. | def _compute_internals(self, moments):
[mean, stdv] = moments
internals = {}
internals['mu'] = mean
internals['sigma'] = stdv
return internals | [
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals",
"def _get_distribution_variables(self, R):\n if self.domain == \"Negative\":\n R_typ = self.param.R_n_typ\n # Particle-size distribution (area-weighted)\n f_a_dist = self.param.f_a_dist_n(R)\n elif self.domain == \"Positive\":\n R_typ = self.param.R_p_typ\n # Particle-size distribution (area-weighted)\n f_a_dist = self.param.f_a_dist_p(R)\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R)\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R)\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R ** 2) / pybamm.Integral(f_a_dist / R ** 2, R)\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.auxiliary_domains[\"secondary\"] == [self.domain.lower() + \" electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(\n f_a_dist_xav, [self.domain.lower() + \" electrode\"]\n )\n f_v_dist = pybamm.SecondaryBroadcast(\n f_v_dist_xav, [self.domain.lower() + \" electrode\"]\n )\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [self.domain.lower() + \" electrode\"]\n )\n\n variables = {\n self.domain + \" particle sizes\": R,\n self.domain + \" particle sizes [m]\": R * R_typ,\n self.domain + \" area-weighted particle-size\"\n + \" distribution\": f_a_dist,\n self.domain + \" area-weighted particle-size\"\n + \" distribution [m-1]\": f_a_dist / R_typ,\n self.domain + \" volume-weighted particle-size\"\n + \" distribution\": f_v_dist,\n self.domain + \" volume-weighted particle-size\"\n + \" distribution [m-1]\": f_v_dist / R_typ,\n self.domain + \" number-based particle-size\"\n + \" distribution\": f_num_dist,\n self.domain + \" number-based particle-size\"\n + \" distribution [m-1]\": f_num_dist / R_typ,\n self.domain + \" area-weighted\"\n + \" mean particle radius\": R_a_mean,\n self.domain + \" area-weighted\"\n + \" mean particle radius [m]\": R_a_mean * R_typ,\n self.domain + \" volume-weighted\"\n + \" mean particle radius\": R_v_mean,\n self.domain + \" volume-weighted\"\n + \" mean particle radius [m]\": R_v_mean * R_typ,\n self.domain + \" number-based\"\n + \" mean particle radius\": R_num_mean,\n self.domain + \" number-based\"\n + \" mean particle radius [m]\": R_num_mean * R_typ,\n self.domain + \" area-weighted particle-size\"\n + \" standard deviation\": sd_a,\n self.domain + \" area-weighted particle-size\"\n + \" standard deviation [m]\": sd_a * R_typ,\n self.domain + \" volume-weighted particle-size\"\n + \" standard deviation\": sd_v,\n self.domain + \" volume-weighted particle-size\"\n + \" standard deviation [m]\": sd_v * R_typ,\n self.domain + \" number-based particle-size\"\n + \" standard deviation\": sd_num,\n self.domain + \" number-based particle-size\"\n + \" standard deviation [m]\": sd_num * R_typ,\n # X-averaged distributions\n \"X-averaged \" + self.domain.lower() +\n \" area-weighted particle-size distribution\": f_a_dist_xav,\n \"X-averaged \" + self.domain.lower() +\n \" area-weighted particle-size distribution [m-1]\": f_a_dist_xav / R_typ,\n \"X-averaged \" + self.domain.lower() +\n \" volume-weighted particle-size distribution\": f_v_dist_xav,\n \"X-averaged \" + self.domain.lower() +\n \" volume-weighted particle-size distribution [m-1]\": f_v_dist_xav / R_typ,\n \"X-averaged \" + self.domain.lower() +\n \" number-based particle-size distribution\": f_num_dist_xav,\n \"X-averaged \" + self.domain.lower() +\n \" number-based particle-size distribution [m-1]\": f_num_dist_xav / R_typ,\n }\n\n return variables",
"def moments(self):",
"def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w",
"def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables",
"def get_gamma_distribution_params(mean, std):\n # mean = k * theta\n # var = std**2 = k * theta**2\n theta = std**2 / mean\n k = mean / theta\n return k, theta",
"def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma",
"def _params(self, inputs):\n if inputs.shape[-1] != self.n_inputs:\n raise ValueError(\n 'Invalid distribution parametrization - expected {} parameters, '\n 'got {}. Input shape: {}.'.format(\n self.n_inputs, inputs.shape[-1], inputs.shape\n )\n )\n n_dims = self._n_dims\n # Split the distribution inputs into two parts: mean and std.\n mean = inputs[..., :n_dims]\n if self._learn_std is not None:\n std = inputs[..., n_dims:]\n # Std is non-negative, so let's softplus it.\n std = tl.Softplus()(std + self._std)\n else:\n std = self._std\n # In case of constant or shared std, upsample it to the same dimensionality\n # as the means.\n std = jnp.broadcast_to(std, mean.shape)\n return (mean, std)",
"def moments2e(image):\n assert len(image.shape) == 2 # only for grayscale images\n x, y = mgrid[:image.shape[0],:image.shape[1]]\n moments = {}\n moments['mean_x'] = sum(x*image)/sum(image)\n moments['mean_y'] = sum(y*image)/sum(image)\n\n # raw or spatial moments\n moments['m00'] = sum(image)\n moments['m01'] = sum(x*image)\n moments['m10'] = sum(y*image)\n moments['m11'] = sum(y*x*image)\n moments['m02'] = sum(x**2*image)\n moments['m20'] = sum(y**2*image)\n moments['m12'] = sum(x*y**2*image)\n moments['m21'] = sum(x**2*y*image)\n moments['m03'] = sum(x**3*image)\n moments['m30'] = sum(y**3*image)\n\n # central moments\n # moments['mu01']= sum((y-moments['mean_y'])*image) # should be 0\n # moments['mu10']= sum((x-moments['mean_x'])*image) # should be 0\n moments['mu11'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])*image)\n moments['mu02'] = sum((y-moments['mean_y'])**2*image) # variance\n moments['mu20'] = sum((x-moments['mean_x'])**2*image) # variance\n moments['mu12'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])**2*image)\n moments['mu21'] = sum((x-moments['mean_x'])**2*(y-moments['mean_y'])*image)\n moments['mu03'] = sum((y-moments['mean_y'])**3*image)\n moments['mu30'] = sum((x-moments['mean_x'])**3*image)\n\n # opencv versions\n #moments['mu02'] = sum(image*(x-m01/m00)**2)\n #moments['mu02'] = sum(image*(x-y)**2)\n\n # wiki variations\n #moments['mu02'] = m20 - mean_y*m10\n #moments['mu20'] = m02 - mean_x*m01\n\n # central standardized or normalized or scale invariant moments\n moments['nu11'] = moments['mu11'] / sum(image)**(2/2+1)\n moments['nu12'] = moments['mu12'] / sum(image)**(3/2+1)\n moments['nu21'] = moments['mu21'] / sum(image)**(3/2+1)\n moments['nu20'] = moments['mu20'] / sum(image)**(2/2+1)\n moments['nu03'] = moments['mu03'] / sum(image)**(3/2+1) # skewness\n moments['nu30'] = moments['mu30'] / sum(image)**(3/2+1) # skewness\n return moments",
"def comp_moments(self):\n rr3dr = self.rr**3*np.log(self.rr[1]/self.rr[0])\n rr4dr = self.rr*rr3dr\n sp2mom0,sp2mom1,cs,cd = [],[],np.sqrt(4*np.pi),np.sqrt(4*np.pi/3.0)\n for sp,nmu in enumerate(self.sp2nmult):\n nfunct=sum(2*self.sp_mu2j[sp]+1)\n mom0 = np.zeros((nfunct))\n d = np.zeros((nfunct,3))\n for mu,[j,s] in enumerate(zip(self.sp_mu2j[sp],self.sp_mu2s[sp])):\n if j==0: mom0[s] = cs*sum(self.psi_log[sp][mu,:]*rr3dr)\n if j==1: d[s,1]=d[s+1,2]=d[s+2,0] = cd*sum(self.psi_log[sp][mu,:]*rr4dr)\n sp2mom0.append(mom0)\n sp2mom1.append(d)\n return sp2mom0,sp2mom1",
"def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])",
"def calc_posterior():\n\n a = [alpha, 1-alpha]\n ms = [x_obs, x_obs]\n Ss = map(lambda s: [[s**2]], sigmas)\n\n return pdf.MoG(a=a, ms=ms, Ss=Ss)",
"def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans",
"def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))",
"def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2",
"def PH2From3Moments (moms, prec=1e-14):\n\n m1, m2, m3 = moms\n\n # check moment boounds\n m2l = APH2ndMomentLowerBound(m1, 2) \n m3l = APH3rdMomentLowerBound(m1, m2, 2) \n m3u = APH3rdMomentUpperBound(m1, m2, 2) \n \n if m2<m2l:\n raise Exception(\"The given second moment is not feasible!\") \n if m3<m3l:\n raise Exception(\"The given third moment is not feasible (too small)!\")\n if m3>m3u:\n raise Exception(\"The given third moment is not feasible (too large)!\")\n \n # check if we have an exponential distribution\n if abs(m2/m1/m1-2.0) < prec:\n return (np.matrix([1]), np.matrix([[-1/m1]]))\n \n # calculate parameters\n b = 3.0*m1*m2-m3\n c = 3.0*m2*m2-2.0*m1*m3\n e = -2.0*m1*m1+m2\n a = b*b+6.0*c*e\n if a<0:\n a = 0\n a = math.sqrt(a)\n if c>0:\n lambda1 = (b - a) / c\n lambda2 = (b + a) / c\n p = (-b-6.0*m1*e+a) / (b+a)\n elif c<0:\n lambda1 = (b + a) / c\n lambda2 = (b - a) / c\n p = (b+6.0*m1*e+a) / (-b+a)\n elif c==0:\n lambda1 = 0\n lambda2 = 1.0 / m1\n p = 0\n \n # return the result\n return (np.matrix([p,1.0-p]), np.matrix([[-lambda1, lambda1], [0,-lambda2]]))",
"def calculate_distribution_param(self, state) -> np.ndarray:\n return self._neural_net(state, training=True)",
"def compute_prob_params(self,counts):\n num_transitions = self.num_transitions\n l = self.MC.number_of_nodes()\n totals = dict()\n for from_state in self.MC.nodes():\n tot = sum([counts[(from_state,to_state)] if ((from_state,to_state) in counts.keys()) else 0 for to_state in self.MC.nodes()])\n totals[from_state] = tot\n if tot > 0:\n for to_state in self.MC.nodes():\n # mean and std. dev of the corresponding beta distribution\n p = (counts[(from_state,to_state)]+1)/(tot+l) if ((from_state,to_state) in counts.keys()) else 1/(tot+l)\n self.MC[from_state][to_state]['mu'] = p\n self.MC[from_state][to_state]['sigma'] = np.sqrt(p*(1-p)/(tot+ (l+1)))\n else:\n for to_state in self.MC.nodes():\n p = 1/(tot+l)\n self.MC[from_state][to_state]['mu'] = p\n self.MC[from_state][to_state]['sigma'] = np.sqrt(p*(1-p)/(tot+ (l+1)))\n\n self.params = {(fs,ts):self.MC[fs][ts]['mu'] for (fs,ts) in self.MC.edges()}\n self.std = {(fs,ts):self.MC[fs][ts]['sigma'] for (fs,ts) in self.MC.edges()}\n self.state_totals = totals",
"def _find_ab_params(self):\n def curve(x, a, b):\n return 1.0 / (1.0 + a * x ** (2 * b))\n\n xv = np.linspace(0, self.spread * 3, 300)\n yv = np.zeros(xv.shape)\n yv[xv < self.min_dist] = 1.0\n yv[xv >= self.min_dist] = np.exp(\n -(xv[xv >= self.min_dist] - self.min_dist) / self.spread)\n params, covar = curve_fit(curve, xv, yv)\n self.a = params[0]\n self.b = params[1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provides a Step Functions Activity data source Example Usage ```python import pulumi import pulumi_aws as aws sfn_activity = aws.sfn.get_activity(name="myactivity") ``` | def get_activity_output(arn: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityResult]:
... | [
"def activity2run(user, activity):\n run = Run()\n run.runner = user\n run.strava_id = activity.id\n run.name = activity.name\n run.distance = activity.distance\n run.elapsed_time = activity.elapsed_time.total_seconds()\n run.average_speed = activity.average_speed\n run.average_heartrate = activity.average_heartrate\n run.total_elevation_gain = activity.total_elevation_gain\n run.start_date = activity.start_date\n return run",
"def get_activities():\n return [\n activity\n for activity in get_source_labels_and_configs()\n if activity[1].name.startswith(\"activities.\")\n ]",
"def construct_strava_activity_data(activity):\n # if the timestamp has been saved then use this over converting the other one\n # issues with server tz so better to use the timestamp at the point the activity record was created\n if activity.iso_timestamp:\n local_time = activity.iso_timestamp\n else:\n local_time = activity.local_timestamp.isoformat()\n\n data = {'name': activity.title,\n 'type': STRAVA_ACTIVITIES_LOOKUP[activity.type],\n 'start_date_local': local_time,\n 'elapsed_time': activity.duration * 60, # need to convert to seconds, stored in db as minutes\n 'description': activity.description}\n\n if activity.distance is not None and activity.distance > 0:\n data['distance'] = activity.distance * 1000 # Strava API requires distance in m, stored in db as km\n\n return data",
"def get_activity(variable):\n project = variable['project']\n try:\n exp = variable['exp']\n if isinstance(exp, list):\n return [CMOR_TABLES[project].activities[value][0] for value in exp]\n return CMOR_TABLES[project].activities[exp][0]\n except (KeyError, AttributeError):\n return None",
"def _load_activity_data(self):\n activity_timeline_list = []\n cuda_compute_ops_timeline_list = []\n args_dict = {}\n activity_file_path = self._get_and_validate_path(\n self._output_activity_execute_time_file_path)\n activity_args_file_path = self._get_and_validate_path(\n self._output_gpu_activity_info_file_path)\n\n if not os.path.exists(activity_args_file_path):\n logger.error(f'The file {activity_args_file_path} does not exist.')\n raise ProfilerFileNotFoundException(activity_args_file_path)\n with open(activity_args_file_path, 'r') as args_file:\n csv_reader = csv.reader(args_file)\n keys_list = next(csv_reader)\n # keys_list format is: name, type, op_full_name, stream_id, block_dim, grid_dim, ...\n self._activity_keys_list = keys_list[1:3] + keys_list[4:6]\n for info in csv_reader:\n args_dict[info[0]] = info[1:3] + info[4:6]\n\n if not os.path.exists(activity_file_path):\n logger.error(f'The file {activity_file_path} does not exist.')\n raise ProfilerFileNotFoundException(activity_file_path)\n with open(activity_file_path, 'r') as f_obj:\n for line in f_obj:\n line_list = line.strip('\\n').split(';')\n # concat activity args info.\n line_list += args_dict.get(line_list[0])\n if not line_list[0].startswith('nccl'):\n cuda_compute_ops_timeline_list.append(line_list)\n activity_timeline_list.append(line_list)\n\n return activity_timeline_list, cuda_compute_ops_timeline_list",
"def activity(*, domain, name, version):\n def function_wrapper(func):\n identifier = '{}:{}:{}'.format(name, version, domain)\n ACTIVITY_FUNCTIONS[identifier] = func\n\n return function_wrapper",
"def get_activity_object(activity_name, settings, logger, conn, token, activity_task):\n full_path = \"activity.\" + activity_name + \".\" + activity_name\n f = eval(full_path)\n # Create the object\n activity_object = f(settings, logger, conn, token, activity_task)\n return activity_object",
"def weather_activity(req):\n\n # validate request parameters, return an error if there are issues\n error, forecast_params = validate_params(req['parameters'])\n if error:\n return error\n\n # Check to make sure there is a activity, if not return an error\n if not forecast_params['activity']:\n return 'What activity were you thinking of doing?'\n\n # create a forecast object which retrieves the forecast from a external API\n try:\n forecast = Forecast(forecast_params)\n # return an error if there is an error getting the forecast\n except (ValueError, IOError) as error:\n return error\n\n # get the response\n return forecast.get_activity_response()",
"def activity(self, activity_id):\r\n return resources.Activity(self, activity_id)",
"def get_activity_data(self, env_name, beginning, end):\n activity = None\n try:\n activity = ProjectActivity(self.env, env_name)\n except ResourceNotFound, e:\n self.log.debug(e)\n activity = ProjectActivity(self.env)\n if not activity.is_expired():\n return activity\n env = None\n parent_dir = get_trac_env_parent_dir(self.env)\n env_dir = os.path.join(parent_dir, env_name)\n try:\n env = open_environment(env_dir, True)\n except Exception, e:\n # if needs_upgrade is True, raise Exception.\n self.log.warn(\"Cannot load a project Environment '%s', \"\n \"Error: %s\" % (env_dir, e))\n return None\n (activity.changes, activity.changes_month) = self._get_rev_count(env, beginning, end)\n (activity.tickets, activity.tickets_closed, activity.tickets_month,\n activity.tickets_closed_month) = self._get_ticket_count(env, beginning, end)\n if activity.exists:\n activity.update()\n else:\n activity.env_name = env_name\n activity.insert()\n return activity",
"def get_activity_stream(self, before=None):\n # if before:\n # url = \"https://www.duolingo.com/stream/{}?before={}\"\n # url = url.format(self.user_data.id, before)\n # else:\n # url = \"https://www.duolingo.com/activity/{}\"\n # url = url.format(self.user_data.id)\n # request = self._make_req(url)\n # try:\n # return request.json()\n # except:\n # raise Exception('Could not get activity stream')\n pass",
"def test_get_activity(self):\n pass",
"def get_activities_goals(self):\n r = self.fitbit_service.get('http://api.fitbit.com/1/user/-/activities/goals/daily.json', header_auth=True)\n logging.debug('Getting Daily Activity Goals. %s %s', r.status_code, r.text)\n if r.status_code != 200:\n return None\n return r.json()",
"def _QueryActivities():\r\n tasks = []\r\n for vp_dict in request['viewpoints']:\r\n if vp_dict.get('get_activities', False):\r\n tasks.append(gen.Task(Viewpoint.QueryActivities, client, vp_dict['viewpoint_id'],\r\n excl_start_key=vp_dict.get('activity_start_key', None),\r\n limit=limit))\r\n else:\r\n tasks.append(util.GenConstant(None))\r\n\r\n activity_results = yield tasks\r\n raise gen.Return(activity_results)",
"def activities(self) -> Sequence[Any]:\n return pulumi.get(self, \"activities\")",
"def get_activities_info(self, date):\n r = self.fitbit_service.get('http://api.fitbit.com/1/user/-/activities/date/%s.json' % date, header_auth=True)\n logging.debug('Getting activities. %s %s', r.status_code, r.text)\n if r.status_code != 200:\n return None\n return r.json()",
"def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity",
"def get_next_activity(self):\n return # osid.learning.Activity",
"def extract_activity(file_addr, activity_name):\n\n activity_num = activity_to_num[activity_name]\n activity = Activity(activity_num)\n\n with open(file_addr, 'r') as file:\n\n next(file, None) # skip the headers\n for line in file:\n row = line.split(' ')\n if activity_name == 'brush_teeth':\n row = line.split(',')\n row[-1] = row[-1][:len(row[-1]) - 1] # deleting \\n from end of line\n\n activity.append_acc_data(float(row[0]), float(row[1]), float(row[2]))\n\n return activity"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the cvxpy variable associated with this layer | def get_cvxpy_variable(self, channel_indx=None):
if channel_indx is None:
output_channels = cp.hstack(
[
self.layer_input[cur_channel_indx]
for cur_channel_indx in range(self.n_in_channels)
]
)
else:
output_channels = self.layer_input[channel_indx]
return output_channels | [
"def xvar ( self ) :\n return self.__xvar",
"def _get_variable(self, varname):\n\n return NetcdfVariableNetcdf4(self._file.variables[varname])",
"def x ( self ) :\n return self.xvar",
"def var(self, name):\n return self.get_ground_vector('Var:{}-Var'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def getVariable(self, gradientCoordinate):\n return self.variables[gradientCoordinate]",
"def get_nc_variables_x_by_attributes(self, axis='X'):\n variables_list = self.dataset.get_variables_by_attributes(axis=axis)\n return variables_list[0] if len(variables_list) > 0 else None",
"def getVariable(self):\n return _libsbml.Rule_getVariable(self)",
"def getVariable(self):\n return self.variable",
"def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])",
"def get_var(self, v):\n\n if not self.is_initialized:\n msg.fail(\"ERROR: problem has not been initialized\")\n\n return self.sim.cc_data.get_var(v)",
"def intrinsic_variable(self):\n if IVARG_ROLE in self.args:\n return self.args[IVARG_ROLE]\n return None",
"def to_var(self, x):\n\t\tif torch.cuda.is_available():\n\t\t\tx = x.cuda()\n\t\treturn Variable(x) #torch.autograd.Variable",
"def get_variable(self, name):\n return self.variables[name]",
"def yvar ( self ) :\n return self.__yvar",
"def get_vector_variables(self):\n\n #Se obtiene el atributo relativo a las variables de decisión.\n return self.__vector_variables",
"def get_nc_variables_z_by_attributes(self, axis='Z'):\n variables_list = self.dataset.get_variables_by_attributes(axis=axis)\n\n return variables_list[0] if len(variables_list) > 0 else None",
"def _get_embedding_variable(self, layer_name):\n return self._tls._embed_variables.get(layer_name, None)",
"def getVariable(self):\n return _libsbml.EventAssignment_getVariable(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns number of output channels | def get_n_channels(self):
return self.n_out_channels | [
"def nb_channels(self):",
"def get_num_channels():\r\n check_mixer()\r\n return sdl.Mix_GroupCount(-1)",
"def num_of_channels(self) -> int:\n return len(self.non_zero_channels())",
"def num_layers(self):\n return len(self.out_channels)",
"def GetNumChannels(self):\n # Return the number of channels\n return self.numChannels",
"def num_channels(self):\n return len(self.__data)",
"def getOutputCount(self):\n return _yarp.BufferedPortImageMono_getOutputCount(self)",
"def num_outputs(self):\n return self.out_dim",
"def get_num_channels(x):\n return x.get_shape().as_list()[-1]",
"def getNumOfInputChannels(self):\n numChannels = ct.c_int()\n tryfunc(th260lib.TH260_GetNumOfInputChannels(\n self._device_number, byref(numChannels)))\n return numChannels.value",
"def getOutputCount(self):\n return _yarp.BufferedPortSound_getOutputCount(self)",
"def getOutputCount(self):\n return _yarp.BufferedPortImageRgb_getOutputCount(self)",
"def channelCount(self) -> int:\n return self.coreImage.getChannelCount()",
"def get_num_channels(self):\n return _uhd_swig.rx_streamer_get_num_channels(self)",
"def input_channels(self) -> int:\n return self._observation_space.shape[0]",
"def channels(self) -> int:\n try:\n return self.image.shape[2]\n except IndexError:\n return 1",
"def getOutputCount(self):\n return _yarp.BufferedPortImageRgbFloat_getOutputCount(self)",
"def out_channels(self):\r\n return [self._width] * (self._depth + 1)",
"def n_channels(self):\n n_channels = RPR.GetMediaSourceNumChannels(self.id)\n return n_channels"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a BiRealNet18 model. | def birealnet18(pretrained=False, **kwargs):
model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)
return model | [
"def resnet18():\n model = ResNet18(BasicBlock, [2, 2, 2, 2])\n #if pretrained:\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def birealnet34(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n None\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model",
"def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def rl_modelrl_l1_tiny():\n hparams = rl_modelrl_tiny()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams",
"def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model",
"def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)",
"def rl_modelrl_l2_tiny():\n hparams = rl_modelrl_tiny()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams",
"def __init__(self,\r\n\t\t\t\t model_file_lang1, model_file_lang2,\r\n\t\t\t\t lex_file_lang1, lex_file_lang2,\r\n\t\t\t\t lex_weight=1):\r\n\t\tself.lex_weight = lex_weight\r\n\t\tself.model = {}\r\n\t\tself.model[self.LANG1] = ViterbiLanguageModel.load(model_file_lang1, lex_file_lang1, lex_weight)\r\n\t\tself.model[self.LANG2] = ViterbiLanguageModel.load(model_file_lang2, lex_file_lang2, lex_weight)",
"def __init__(self):\n\t\tsuper(BigramModel, self).__init__()",
"def newModel(self, model_name):\n model = super().newModel(model_name)\n model.Params.Method = self.getint(CC.L2_GRB_METHOD, section=CC.GUROBI, default=-1)\n model.Params.Presolve = self.getint(CC.L2_GRB_PRESOLVE, section=CC.GUROBI, default=-1)\n model.Params.PreSparsify = self.getint(CC.L2_GRB_PRESPARSIFY, section=CC.GUROBI, default=-1)\n return model",
"def build_model():",
"def __init__(self, model_name, base_path, models_path,\n weights_path, shapes, dyns, outdirs,\n nets_name, input_names, framework, target):\n super(MXNetGraphConverter, self).__init__(framework, base_path)\n print(\"{} bmodel converter init\".format(model_name))\n self.model_name = model_name\n self.models_path = models_path\n self.weights_path = weights_path\n self.shapes = shapes\n self.dyns = dyns\n self.outdirs = outdirs\n self.nets_name = nets_name\n self.input_names = input_names\n self.target = target\n assert len(self.models_path) == len(self.weights_path)\n assert len(self.models_path) == len(self.nets_name)\n assert len(self.models_path) == len(self.dyns)\n assert len(self.models_path) == len(self.outdirs)\n assert len(self.models_path) == len(self.input_names)\n self.output_base_path = os.path.join(self.base_path,\n self.model_name + \"_ir\")\n if not os.path.exists(self.output_base_path):\n os.mkdir(self.output_base_path)",
"def __init__(self):\n\n # Load the model\n # 1. set the appropriate parameters\n self.eval_batch_size = 16\n self.max_seq_length = 64\n self.do_lower_case = True\n\n # 2. Initialize the PyTorch model\n model_state_dict = torch.load(DEFAULT_MODEL_PATH+'pytorch_model.bin', map_location='cpu')\n self.tokenizer = BertTokenizer.from_pretrained(DEFAULT_MODEL_PATH, do_lower_case=self.do_lower_case)\n self.model = BertForMultiLabelSequenceClassification.from_pretrained(DEFAULT_MODEL_PATH,\n num_labels=len(LABEL_LIST),\n state_dict=model_state_dict)\n self.device = torch.device(\"cpu\")\n self.model.to(self.device)\n\n # 3. Set the layers to evaluation mode\n self.model.eval()",
"def resnet18():\n return ResNet(Basic_Block, [2, 2, 2, 2])",
"def __init__(self, weights, nb_classes = 39):\n\n self.mdl = model_keras.model_vt(nb_classes=nb_classes, dataset_name=\"modelnet\")\n\n self.mdl.load_weights(weights)",
"def __init__(self, model_name, base_path, models_path,\n shapes, dyns, outdirs, nets_name,\n input_names, output_names, framework, target):\n super(TfGraphConverter, self).__init__(framework, base_path)\n print(\"{} bmodel converter init\".format(model_name))\n self.model_name = model_name\n self.models_path = models_path\n self.shapes = shapes\n self.dyns = dyns\n self.outdirs = outdirs\n self.nets_name = nets_name\n self.input_names = input_names\n self.output_names = output_names\n self.target = target\n assert len(self.models_path) == len(self.shapes)\n assert len(self.models_path) == len(self.nets_name)\n assert len(self.models_path) == len(self.dyns)\n assert len(self.models_path) == len(self.outdirs)\n self.output_base_path = os.path.join(self.base_path,\n self.model_name + \"_ir\")\n if not os.path.exists(self.output_base_path):\n os.mkdir(self.output_base_path)",
"def create_model(self):\r\n max_features_ngram = self.max_features_ngram + 1 # input dims # input dims\r\n inputs = Input(shape=(self.maxlen,))\r\n embed = Embedding(max_features_ngram, self.embedding_length)(inputs)\r\n gru = GRU(256, dropout=0.25, recurrent_dropout=0.25, return_sequences=True)(embed)\r\n output = Attention_layer()(gru)\r\n dense1 = Dense(256, activation='relu')(output)\r\n bn = BatchNormalization()(dense1)\r\n dense2 = Dense(self.nb_classes, activation='softmax')(bn)\r\n model = Model(inputs=inputs, outputs=dense2)\r\n return model"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a BiRealNet34 model. | def birealnet34(pretrained=False, **kwargs):
model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)
return model | [
"def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def resnet18():\n model = ResNet18(BasicBlock, [2, 2, 2, 2])\n #if pretrained:\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def initialize_model():\n model = models.squeezenet1_0(pretrained=False)\n model.features[0] = nn.Conv2d(1, 96, kernel_size=7, stride=2)\n model.classifier[1] = nn.Conv2d(512, 25, kernel_size=(1, 1), stride=(1, 1))\n\n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNetlist(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)",
"def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)",
"def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self",
"def vt_resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VTResNet:\n return create_model('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)",
"def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model",
"def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def build_lenet_model():\n model = build_preprocess_layers()\n model.add(Convolution2D(6, 5, 5, activation=\"relu\"))\n model.add(MaxPooling2D())\n model.add(Convolution2D(6, 5, 5, activation=\"relu\"))\n model.add(MaxPooling2D())\n model.add(Flatten())\n model.add(Dense(120))\n model.add(Dense(84))\n model.add(Dense(1))\n\n return model",
"def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model",
"def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model",
"def build_backbone(config):\n assert config.MODEL.BACKBONE in ['resnet50', 'resnet101'], \"backbone name is not supported!\"\n backbone_name = config.MODEL.BACKBONE\n dilation = False\n train_backbone = not config.EVAL\n return_interm_layers = False #TODO: impl case True for segmentation\n\n position_embedding = build_position_encoding(config.MODEL.TRANS.HIDDEN_SIZE)\n backbone = Backbone(backbone_name, train_backbone, return_interm_layers, dilation)\n model = Joiner(backbone, position_embedding)\n model.num_channels = backbone.num_channels\n\n return model",
"def build_model():",
"def __init__(self, num_models: int, num_classes: int):\n self.nun_models = num_models\n self.num_classes = num_classes\n self.model: keras.Model = self.init_model()",
"def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clears the peak_to_peer info which can get quite large. | async def clear_sync_info(self) -> None:
self.peak_to_peer = orderedDict() | [
"def reset():\n Peaks.offsets = set()\n Peaks.offsetdone = False",
"def clear(self):\n self._fingerprint = 0",
"def deleteRemainingPeaks(self):\n \n for existingPeak in self.existingPeaks:\n applData = existingPeak.findFirstApplicationData(application = self.format, keyword = peakNum_kw)\n if applData:\n addString = \" (number %d)\" % applData.value\n else:\n addString = \"\"\n print \" Warning: Deleting original peak%s\" % addString\n \n existingPeak.delete()",
"def reset(self):\n self.put('NPTS', 0)\n for i in range(1, NUM_TRIGGERS+1):\n self.clear_trigger(i)\n for i in range(1, NUM_POSITIONERS+1):\n self.clear_positioner(i)\n for i in range(1, NUM_DETECTORS+1):\n self.clear_detector(i)\n poll(1.e-3, 1.0)",
"def reset(self):\n self.subnets = dict()\n for p in self.source_hash.points:\n p.forward_cands = []\n p.subnet = None\n for i, p in enumerate(self.dest_hash.points):\n # p.back_cands = []\n p.subnet = i\n self.subnets[i] = set(), {p}",
"def clear(self):\n self._members = []\n self._size = 0\n self._updated = True\n self._BFS_collect = None\n self._center = None",
"def clean_connected_peers(self):\n\n if len(self.connected_peers) > 1: \n i = 0\n for addr in self.connected_peers:\n j = 0\n for other in self.connected_peers[i+1:]:\n if addr[0] == other[0] and int(addr[1]) == int(other[1]):\n self.connected_peers.pop(j)\n j -= 1\n j += 1\n i += 1",
"def clear(self):\n self.__attendees = []\n self._track_changes()",
"def clear(self):\n self._latencies = [0] * len(BUCKETS)",
"def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()",
"def clear_receiver(receiver):\n database.update_receiver_media(receiver, \"\", \"\")\n\n database.update_receiver_status(receiver, \"\")",
"def reset(self):\n self._monitor.notify_received()\n self._pinger.stop()\n self._mark_fresh()",
"def clear_info(self):\n self._start_info = {}\n self._end_info = {}",
"def rm_calibration(self):\n\n self.bin_edges_kev = None",
"def invalidate_min_max(self):\n self.max_amplitude = None\n self.min_amplitude = None\n self.max_wavenumber = None\n self.min_wavenumber = None",
"def clearResonancePeakDimContribs(resonance,peaks=None):\n\n if not peaks:\n peaks = []\n\n peakDict = {}\n for peak in peaks:\n peakDict[peak] = True\n \n peakDims = {} \n for contrib in resonance.peakDimContribs:\n peakDim = contrib.peakDim\n \n if (not peakDict) or peakDict.get(peakDim.peak):\n peakDims[peakDim] = True\n peakContribs = contrib.peakContribs\n contrib.delete()\n \n for peakContrib in peakContribs:\n if not peakContrib.peakDimContribs:\n peakContrib.delete()",
"def _cancel_peak_checker(self) -> None:\n if self._peak_checker_unsub is not None:\n self._peak_checker_unsub()\n self._peak_checker_unsub = None",
"def clear(self):\n\n self.__fasteners.clear()\n self.__update()",
"def clearPulse(self):\n self.pulses = dict() # old mode for compatibility reasons\n self._params[\"pulses\"] = dict() # old mode\n self.totalPulse[:] = 0 # old mode\n self.sendPulse() # old mode\n\n self.clearMarkersList() # new mode\n self.pulseList = []\n self.preparePulseSequence()\n self.prepareMarkerSequence()\n self.sendPulseSequence()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a hex string from the venue names to use as a unique id. Only the last 8 characters are used for the unique id. | def make_unique_id(venue_list):
md5_hash = md5()
for name in venue_list:
md5_hash.update(name)
hash_hex = md5_hash.hexdigest()
return hash_hex[-8:] | [
"def create_id(name):\n if _IDENTIFIER.match(name):\n return str(name).lower()\n return hexlify(name.encode('utf-8')).decode()",
"def getuuid(data):\n if type(data) != bytes:\n data = data.encode('utf-8')\n h = hashlib.sha256(data).hexdigest()[:32].upper()\n for i, pos in enumerate([8, 12, 16, 20]):\n h = h[:i+pos] + '-' + h[i+pos:]\n return h",
"def generate_id():\n return str(hex(int(time.time() * 10 ** 7)))[5:]",
"def get_hex_id(fullRouterName):\n hexId = \"\"\n if fullRouterName.count(\"=\") > 0:\n hexId = fullRouterName.split(\"=\")[0]\n else:\n hexId = fullRouterName.split(\"~\")[0]\n hexId = hexId.replace(\"$\", \"\")\n return hexId",
"def ulid_hex() -> str:\n return f\"{int(time.time()*1000):012x}{getrandbits(80):020x}\"",
"def flat_uuid():\r\n return str(uuid.uuid4()).replace('-', '')",
"def create_hash_hex(self, vehicles):\n field = \"\"\n for i, vehicle in enumerate(vehicles):\n if vehicle.orientation == 'H':\n x = vehicle.x\n if x == 10:\n x = \"a\"\n elif x == 11:\n x = \"b\"\n field += str(x)\n else:\n y = vehicle.y\n if y == 10:\n y = \"a\"\n elif y == 11:\n y = \"b\"\n field += str(y)\n return field",
"def _uniquify_name(self, name):\n\n return name.ljust(32, \"-\")[:32] + uuid.uuid4().hex",
"def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')",
"def generate_event_uuid():\n return str(uuid.uuid4()).replace('-', '')",
"def serial_to_unique_id(serial: int) -> str:\n return hex(serial)[2:].zfill(8)",
"def make_unique_id(experiment):\n experiment.pop('Electrolyte')\n experiment.pop('Format')\n\n _id = []\n for value in experiment.values():\n value = str(value)\n value = value.lower()\n value = value.replace(',', '')\n _id.append('-'.join(value.split()))\n\n return '-'.join(_id)",
"def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()",
"def uniqid():\n return ''.join(\n random.choice(string.ascii_lowercase\n + string.ascii_uppercase\n + string.digits)\n for i in range(24)\n )",
"def _NewUUIDString ():\n if __HaveUUID:\n return uuid.uuid1().urn\n return '%s:%08.8x' % (time.strftime('%Y%m%d%H%M%S'), random.randint(0, 0xFFFFFFFF))",
"def make_trace_id(trace_id: bytes) -> str:\n return base64.b64encode(trace_id).decode(\"utf-8\")",
"def _get_unique_token() -> str:\n return f\"0x{int(secrets.token_hex(4), base=16):X}\"",
"def _make_uuid(val):\n h = hashlib.md5(val).hexdigest()\n return '{0}-{1}-{2}-{3}-{4}'.format(\n h[:8], h[8:12], h[12:16], h[16:20], h[20:])",
"def _comune_id(i):\n return str(i).zfill(6)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Raises a ValueError if matrix `value` is not square. | def assert_square(name: str, value: np.ndarray) -> None:
if not len(value.shape) == 2 or value.shape[0] != value.shape[1]:
raise ValueError(f"{name} must be a square") | [
"def _check_square(matrix):\n if matrix.ndim != 2 or (matrix.shape[0] != matrix.shape[-1]):\n raise ValueError(\n f\"Expected a square matrix, got array of shape {matrix.shape}.\"\n )",
"def test_change_basis_raises_not_square(self, fun):\n A = np.random.rand(4, 6)\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n fun(A)",
"def test_valueError(self):\n self.assertRaises(ValueError, Square, -8, 7, 8)\n self.assertRaises(ValueError, Square, 2, -5, 9)\n self.assertRaises(ValueError, Square, 2, 5, -10)",
"def isSquareMatrix( m ):\n try:\n if m.shape[0]!=m.shape[1]:\n raise matrixFormatException( m.shape[0] , m.shape[1] )\n except matrixFormatException:\n print \"Error: The Bus Admittance Matrix must be a Square Matrix.\"\n return -1\n return int( m.shape[0] )",
"def valid_square(self, row, col, value):\n # Check that the row and col are valid puzzle indices\n if not ((0 <= row < self.sl) and (0 <= col < self.sl)):\n return False\n\n # Check that the square input is empty\n if self.puzzle[row][col] != 0:\n return False\n \n # Check that the value input is a valid puzzle value\n if not (1 <= value <= self.sl):\n if self.puzzle[row][col] == 0 and value == 0:\n return True\n return False\n \n # Check each row, column and block for same number\n for i in range(self.sl): \n if self.puzzle[row][i] == value: # Check each square in row for same value\n return False\n if self.puzzle[i][col] == value: # Check each square in col for same value\n return False\n \n # Check each square in box for same value, a little more complex index-wise\n r = self.bs*(row//self.bs) + (i//self.bs) \n c = self.bs*(col//self.bs) + (i%self.bs) \n if self.puzzle[r][c] == value:\n return False\n \n return True",
"def test_square_TypeError_width(self):\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n Square('string')\n Square(None)\n Square(True)\n Square((3, 4))\n Square(float('nan'))\n Square(float('inf'))",
"def validSquare(self, square):\n assert(isinstance(square[0], int) and isinstance(square[1], int))\n assert(square[0] >= 0 and square[1] >= 0)\n assert(square[1] < self.size and square[1] < self.size)",
"def _check_matrix_is_square(a: np.ndarray) -> bool:\n\n M, N = a.shape\n\n return M == N",
"def test_non_symmetric_matrix_raises(self):\n rng = np.random.RandomState(42)\n M = rng.randn(10, 10)\n with pytest.raises(ValueError) as raised_error:\n components_from_metric(M)\n assert str(raised_error.value) == \"The input metric should be symmetric.\"",
"def square(value):\n return value ** 2",
"def test_square_ValueError_height(self):\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n Square(-4)\n Square(0)\n Square(height=2)",
"def valid_matrix(matrix: Union[list, tuple]) -> None:\n if not isinstance(matrix, (list, tuple)):\n raise TypeError('it not matrix')\n for row in matrix:\n if not isinstance(row, (list, tuple)):\n raise TypeError('it not matrix')\n elif len(row) != len(matrix[0]):\n raise ValueError('rows have not same length')\n for elem in row:\n if not isinstance(elem, (int, float)):\n raise ValueError('matrix element not number')",
"def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]",
"def checkSquare(self, row, column, number):\n if row in (0, 1, 2): rowrange = (0, 1, 2)\n if row in (3, 4, 5): rowrange = (3, 4, 5)\n if row in (6, 7, 8): rowrange = (6, 7, 8)\n if column in (0, 1, 2): colrange = (0, 1, 2)\n if column in (3, 4, 5): colrange = (3, 4, 5)\n if column in (6, 7, 8): colrange = (6, 7, 8)\n \n for y in rowrange:\n for x in colrange:\n if self.getNum(y, x) == number:\n return True\n return False",
"def test_badsizevaluefloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(float(1), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def test_set_cell_with_too_large_column(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (0, 9), 0)",
"def test_argument_number_square(self):\n with self.assertRaises(TypeError):\n s1 = Square()",
"def is_square(m):\n if not hasattr(m, '__len__'):\n return False\n\n is_flat_square_matrix = all(np.isscalar(c) for c in m) and np.sqrt(len(m)).is_integer()\n if is_flat_square_matrix:\n return True\n\n is_structed_square_matrix = all(len(row) == len(m) for row in m)\n return is_structed_square_matrix",
"def test_set_cell_with_too_large_row(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (9, 0), 0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the Shannon entropy for probabilities `ps` with `base`. | def shannon_entropy(ps: np.ndarray, base: int = 2) -> float:
return -np.sum(ps * np.log(ps) / np.log(base)) | [
"def calculate_normalized_entropy(probabilities: list, base: float) -> float:\n entropy = calculate_entropy(probabilities)\n logarithm_base = np.log(base)\n normalized_entropy = entropy / logarithm_base\n\n return normalized_entropy",
"def pssm_entropy_per_base( self ):\n return sum(\n [\n expected_entropy_under_dirichlet( self.omega[j + 1,:] )\n for j in xrange( self.K )\n ]\n ) / self.K",
"def entropy(pd):\n entp = 0\n for proba in pd:\n if proba == 0:\n continue\n entp -= proba*math.log(proba, 2)\n return entp",
"def entropy_py(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))",
"def entropy(aln, col, base):\n\n logging.info(\"Calculating the entropy at column {}\".format(col))\n\n freq = column_frequencies(aln, col)\n entropy = 0\n for key in freq:\n if freq[key] != 0:\n entropy -= freq[key]*math.log(freq[key], base)\n return entropy",
"def entropy(ps):\n single_dist = len(ps.shape) == 1\n if single_dist: ps = np.array([ps]);\n # H = -1 * np.sum(ps * np.log2(ps), axis=1)\n H = -1 * np.sum(np.multiply(ps, np.ma.log2(ps).filled(0)), axis=1)\n return H[0] if single_dist else H",
"def calculate_entropy(prob):\n return -(prob * math.log(prob,2))",
"def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )",
"def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))",
"def entropy(p):\n assert (p >= 0).all()\n assert abs(np.sum(p)-1) < 1e-6\n return -np.sum(p*np.log(p+1e-12))",
"def entropyIntIntegrand(p):\n return p * np.log(p))",
"def entropy(p):\n h = 0\n\n # TODO -- Calculate entropy value in nats for probability distribution `p`\n for x in p:\n h -= p[x] * math.log(p[x])\n\n return h",
"def entropy_coefficient(filter1, filter2, base=2):\n\n if (type(filter1) is NullField) or (type(filter2) is NullField):\n return 0\n\n total_count = int(filter1.bit_size)\n\n f1_element_count = filter1.filter.count(True)\n f2_element_count = filter2.filter.count(True)\n\n prob_f1 = f1_element_count / total_count\n prob_f2 = f1_element_count / total_count\n\n e_f1 = -1.0 * total_count * prob_f1 * math.log(prob_f1) / math.log(base)\n e_f2 = -1.0 * total_count * prob_f2 * math.log(prob_f2) / math.log(base)\n\n entropy = abs(e_f1 - e_f2)\n\n # for element_count in Counter(data).values():\n # p = element_count / total_count\n # entropy -= p * math.log(p, self.base)\n\n assert entropy >= 0\n\n return 1 - entropy",
"def entropy(probs):\n # have some checks for now. can remove when the code is more stable\n if np.min(probs) < 0.:\n raise ValueError(\"Probability cannot be < 0\")\n if np.max(probs) > 1.:\n raise ValueError(\"Probability cannot be > 1\")\n # assuming binary distribution, input probability is Pr(x = 1)\n eps = 1e-5 # to avoid np.log(0)\n probs = probs + eps\n return -probs * np.log(probs) - (1. - probs) * np.log(1. - probs)",
"def entropy(samples):\r\n counterList = classCounter(samples)\r\n probability = [x / sum(counterList) for x in counterList]\r\n return -sum([x * math.log2(x) for x in probability if x != 0])",
"def shannon_entropy(counts):\n freq = np.array(counts) * 1.0 / np.sum(counts)\n return -np.sum([f * np.log2(f) for f in freq if f != 0])",
"def base_entropy_masked(seq_list, base_set, base_idx):\n # entropy analysis\n base_list = [seq[base_idx] for seq in seq_list]\n freq_dict = Counter(base_list)\n mask_list = ['-', 'N']\n n_seq = sum([freq_dict[base] for base in freq_dict if base not in mask_list])\n H = 0\n total_masked = 0\n for base in freq_dict:\n if base in mask_list:\n total_masked += freq_dict[base]\n continue\n P = freq_dict[base]/n_seq\n H -= log2(P) * P\n masked_pct = total_masked/len(base_list)\n return H, masked_pct",
"def compute_shanon_entropy(sig):\n pd_series = pd.Series(sig)\n counts = pd_series.value_counts()\n entro = stats.entropy(counts, base=2) # shan_en = -sum(p(xi)*log(p(xi)))\n return entro",
"def ShannonEntropy(self,s):\n e = s[np.nonzero(s)]**2 * np.log(s[np.nonzero(s)]**2)\n return np.sum(e)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simply tests if `img` has 3 channels. | def is_rgb(img: np.ndarray) -> bool:
return len(img.shape) >= 1 and img.shape[-1] == 3 | [
"def is_rgb(im):\n return len(im.shape) == 3",
"def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False",
"def rgb(self) -> bool:\n return self.image_shape[2] == 3",
"def test_make_3channel():\n img_path = (\n Path(__file__).parent\n / 'images'\n / 'SEM_image_of_blood_cells.jpg'\n )\n grey_img = mpimg.imread(img_path)\n colour_img = td.make_3channel(grey_img)\n assert colour_img.shape == (grey_img.shape[0], grey_img.shape[1], 3)",
"def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True",
"def check_isrgb(im):\n \n im_sze = im.shape\n \n if len(im_sze) != 3:\n im = np.stack((im,) * 3, axis=-1)\n \n return im",
"def is3DImage(self):\n\t\treturn self.is3D",
"def check_niimg_3d(niimg, dtype=None):\n return check_niimg(niimg, ensure_ndim=3, dtype=dtype)",
"def has_alpha(im):\n return True if im.shape[-1] == 4 else False",
"def has_channels_equal(image: np.array) -> bool:\n first_channel = image[..., 0]\n return all(\n np.equal(image[..., channel_num], first_channel).all()\n for channel_num in range(1, image.shape[-1])\n )",
"def num_channels_in_image(img: torch.Tensor):\n if img is None or img.ndim < 2:\n raise ValueError('Invalid image data')\n if img.ndim == 2:\n return 1\n else:\n return img.shape[0]",
"def is_gray(img: np.ndarray):\n return len(img.shape) == 2 and img.shape[0] > 1 and img.shape[1] > 1",
"def is_grayscale(img):\n return len(img.shape) == GS",
"def is_cv3():\n (major, minor, _) = cv2.__version__.split('.')\n return int(major) == 3",
"def check_image_size(img_name, img_path):\n \n try:\n \n # Open image\n img = Image.open(img_name)\n \n # Determine size of image\n width, height = img.size\n \n # Check if image is square\n if (width==height):\n is_square = True\n else:\n is_square = False\n \n # Check for channels in image\n img_list = list(img.getdata())\n img_max = max(img_list)\n if (type(img_max)==int):\n is_single_channel = True\n else:\n is_single_channel = False\n \n return is_square, is_single_channel\n \n finally:\n \n # Close image\n img.close()",
"def _isGrayscale(self, img: ndarray) -> bool:\n if len(np.squeeze(img).shape) == 2:\n return True\n else:\n return False",
"def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False",
"def check_crop_size(image: np.array) -> bool:\n image_height, image_width = image.shape[:2]\n return image_height % 32 == 0 and image_width % 32 == 0",
"def check_niimg_3d(niimg):\n niimg = load_niimg(niimg)\n\n shape = niimg.shape\n if len(shape) == 3:\n pass\n elif (len(shape) == 4 and shape[3] == 1):\n # \"squeeze\" the image.\n data = _safe_get_data(niimg)\n affine = niimg.get_affine()\n niimg = new_img_like(niimg, data[:, :, :, 0], affine)\n else:\n raise TypeError(\"A 3D image is expected, but an image \"\n \"with a shape of %s was given.\" % (shape, ))\n\n return niimg"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts an array [..., channels] of RGB values to HSI color values (H in rad). RGB values are assumed to be normalized to (0, 1). | def rgb_to_hsi(image: np.ndarray) -> np.ndarray:
if not is_rgb(image):
raise ValueError("Input needs to be an array of RGB values")
r = image[..., 0]
g = image[..., 1]
b = image[..., 2]
out = np.zeros_like(image)
# allequal = (img == img[:, :, 0, np.newaxis]).all(axis=-1)
with np.errstate(invalid="ignore"):
tmp = (2.0 * r - g - b) / 2.0 / np.sqrt((r - g) ** 2 + (r - b) * (g - b)) # if r==g==b then 0/0
theta = np.arccos(np.clip(tmp, -1.0, +1.0))
out[..., 0] = np.where(b <= g, theta, 2 * np.pi - theta) # H
out[..., 2] = np.sum(image, axis=-1) / 3.0 # I
out[..., 1] = 1 - np.amin(image, axis=-1) / out[..., 2] # S if r==g==b==0 then 0/0
np.nan_to_num(out[..., 0:2], copy=False)
return out | [
"def hc2hhsi(hc):\n\n import numpy as np\n\n ####################################################################################################################\n # Calculate the components c\n rows = hc.shape[0]\n cols = hc.shape[1]\n dims = hc.shape[2]\n\n c = np.zeros((rows, cols, dims-1))\n for i in range(dims - 1):\n nonZeroEle = dims - i # nonZeroEle is the number of non-zero elements of the base unit vector u1, u2, ...\n c[:, :, i] = (nonZeroEle - 1) ** 0.5 / nonZeroEle ** 0.5 * hc[:, :, i] \\\n - 1 / ((nonZeroEle - 1) ** 0.5 * nonZeroEle ** 0.5) * np.sum(hc[:, :, i+1:dims], axis=2)\n ####################################################################################################################\n\n # Normalise the norms of c to 1 to obtain hyper-hue hh.\n c_norm = np.sum(c ** 2, axis=2) ** 0.5\n c_norm = c_norm + (c_norm == 0) * 1e-10\n c_norm = np.tile(c_norm, (dims - 1, 1, 1))\n c_norm = np.moveaxis(c_norm, 0, -1)\n hh = c / c_norm # add 1e-10 to avoid zero denominators\n\n # Saturation\n s = hc.max(2) - hc.min(2)\n # s = np.amax(hc, axis=2) - np.amin(hc, axis=2) # The same as above\n\n # Intensity\n i = 1/dims * np.sum(hc, 2)\n\n return hh, s, i",
"def filter_rgb_to_hed(np_img, output_type=\"uint8\"):\n t = Time()\n hed = sk_color.rgb2hed(np_img)\n if output_type == \"float\":\n hed = sk_exposure.rescale_intensity(hed, out_range=(0.0, 1.0))\n else:\n hed = (sk_exposure.rescale_intensity(hed, out_range=(0, 255))).astype(\"uint8\")\n\n util.np_info(hed, \"RGB to HED\", t.elapsed())\n return hed",
"def rgb2hsl(rgb_image):\n assert len(rgb_image.shape) == 3, \"Image array not 3D!\"\n assert rgb_image.shape[2], \"Last dimension must have length 3! (RGB)\"\n\n # Use OpenCV to convert RGB image to HLS\n output = cv2.cvtColor(rgb_image.astype(np.float32), cv2.COLOR_RGB2HLS)\n\n # Flip channels: HLS -> HSL\n output[:, :, 1], output[:, :, 2] = (\n output[:, :, 2].copy(),\n output[:, :, 1].copy(),\n )\n\n # Normalize H range: 0-360 -> 0-1\n output[:, :, 0] /= 360.0\n\n # Return HSL image\n return output",
"def RGB2HSL(R,G,B): \n var_Min = min( R, G, B ) \n var_Max = max( R, G, B ) \n del_Max = var_Max - var_Min \n L = ( var_Max + var_Min ) / 2\n if del_Max == 0: \n H = 0 \n S = 0\n else:\n if L < .5:\n S = del_Max / ( var_Max + var_Min )\n else:\n S = del_Max / ( 2. - var_Max - var_Min )\n del_R = ( ( ( var_Max - R ) / 6. ) + ( del_Max / 2. ) ) / del_Max\n del_G = ( ( ( var_Max - G ) / 6. ) + ( del_Max / 2. ) ) / del_Max\n del_B = ( ( ( var_Max - B ) / 6. ) + ( del_Max / 2. ) ) / del_Max\n if R == var_Max:\n H = del_B - del_G\n elif G == var_Max:\n H = ( 1 / 3. ) + del_R - del_B\n elif B == var_Max:\n H = ( 2 / 3. ) + del_G - del_R\n if H < 0:\n H += 1\n if H > 1:\n H -= 1\n return [H, S, L]",
"def rgb8_to_hsl(rgb):\n HUE_MAX = 6\n\n r = rgb[0] / 255.0\n g = rgb[1] / 255.0\n b = rgb[2] / 255.0\n\n cmin = min(r, g, b)\n cmax = max(r, g, b)\n delta = cmax - cmin\n h = 0\n s = 0\n l = (cmax + cmin)\n\n if delta != 0:\n if l < 0.5:\n s = delta / l\n else:\n s = delta / (2 - l)\n\n if r == cmax:\n h = (g - b) / delta\n elif g == cmax:\n h = 2 + (b - r) / delta\n elif b == cmax:\n h = 4 + (r - g) / delta\n\n return h, s, l",
"def rgb_to_hsv(x):\n # separating channels\n R = x[:,:,0]\n G = x[:,:,1]\n B = x[:,:,2]\n \n \n # h, s, v = hue, saturation, value \n # initial arrays for h, s and v filled with 0.0\n # we take R array just as 2D sample for copying the shape\n H = np.full_like(R, 0.0, dtype=np.double)\n S = np.full_like(R, 0.0, dtype=np.double)\n V = np.full_like(R, 0.0, dtype=np.double)\n \n HSV = np.full_like(x, 0.0, dtype=np.double)\n \n # np.max/min and axis=2 creates a 2D matrix\n C_max = np.max(x, axis=2) # maximum of r, g, b \n C_min = np.min(x, axis=2) # minimum of r, g, b \n Diff = C_max - C_min # diff of cmax and cmin. \n \n # Formula:\n # https://www.geeksforgeeks.org/program-change-rgb-color-model-hsv-color-model/\n \n # if cmax and cmax are equal (R=G=B) then h = 0 \n H[np.isclose(C_max, R, 0.0001)] = 0 \n \n # if cmax equal r \n m = np.isclose(C_max, R, 0.0001)&(Diff!=0)\n H[m] = (60 * ((G[m] - B[m]) / Diff[m]) + 360) % 360\n \n\n # if cmax equal g \n m = np.isclose(C_max, G, 0.0001)&(Diff!=0)\n H[m] = (60 * ((B[m] - R[m]) / Diff[m]) + 120) % 360\n \n # if cmax equal b \n m = np.isclose(C_max, B, 0.0001)&(Diff!=0)\n H[m] = (60 * ((R[m] - G[m]) / Diff[m]) + 240) % 360\n \n # if cmax equal zero \n S[C_max == 0] = 0\n \n # else\n m = (C_max != 0)\n S[m] = (Diff[m] / C_max[m])\n \n # compute v \n V = C_max\n \n # building new 3D picture\n HSV[:,:,0] = H\n HSV[:,:,1] = S\n HSV[:,:,2] = V\n \n return HSV",
"def HSL2RGB(H,S,L):\n if S == 0: \n R = L\n G = L\n B = L\n else:\n if L < 0.5 :\n var_2 = L * ( 1 + S )\n else:\n var_2 = ( L + S ) - ( S * L )\n var_1 = 2 * L - var_2\n R = Hue2RGB( var_1, var_2, H + ( 1 / 3. ) )\n G = Hue2RGB( var_1, var_2, H )\n B = Hue2RGB( var_1, var_2, H - ( 1 / 3. ) )\n return [R,G,B]",
"def to_hsv(self) -> HSV_Array_Float:\n return colorsys.rgb_to_hsv(*self.to_rgb())",
"def rgbToHsv ( r, g = 0.0, b = 0.0 ):\n\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n\n _max = max( r, g, b )\n _min = min( r, g, b )\n v = _max\n d = _max - _min;\n s = 0.0 if _max == 0.0 else d / _max\n\n if _max == _min:\n h = 0.0 # achromatic\n else:\n if _max == r:\n h = (g - b) / d + ( 6.0 if g < b else 0.0 )\n elif _max == g:\n h = (b - r) / d + 2.0\n elif _max == b:\n h = (r - g) / d + 4.0\n h /= 6.0\n return [ h, s, v ]",
"def bgr2hsv(x: np.ndarray) -> np.ndarray:\n return cv2.cvtColor(x, cv2.COLOR_BGR2HSV)",
"def rgb_to_hsb(r, g, b):\n h, s, v = 0, 0, max(r, g, b)\n d = v - min(r, g, b)\n if v != 0:\n s = d / float(v)\n if s != 0:\n if r == v: h = 0 + (g-b) / d\n elif g == v: h = 2 + (b-r) / d\n else : h = 4 + (r-g) / d\n h = h / 6.0 % 1\n return h, s, v",
"def hslToRgb ( h, s = 0.0, l = 0.0, a = 1.0 ):\n\n # Check if argument is list\n if isinstance(h, list):\n s = h[1]\n l = h[2]\n h = h[0]\n\n if isinstance(h, int):\n h /= 360.0\n if isinstance(s, int):\n s /= 100.0\n if isinstance(l, int):\n l /= 100.0\n\n r = l\n g = l\n b = l\n v = l * ( 1.0 + s ) if l <= 0.5 else l + s - l * s\n if ( v > 0 ):\n m = l + l - v\n sv = ( v - m ) / v\n h *= 6.0\n sextant = int( math.floor( h ) )\n fract = h - sextant\n vsf = v * sv * fract\n mid1 = m + vsf\n mid2 = v - vsf\n\n # Switch sextant\n if sextant == 0:\n r = v\n g = mid1\n b = m\n elif sextant == 1:\n r = mid2\n g = v\n b = m\n elif sextant == 2:\n r = m\n g = v\n b = mid1\n elif sextant == 3:\n r = m\n g = mid2\n b = v\n elif sextant == 4:\n r = mid1\n g = m\n b = v\n elif sextant == 5:\n r = v\n g = m\n b = mid2\n\n return [ r, g, b ]",
"def rgb2hed(rgb):\n return separate_stains(rgb, hed_from_rgb)",
"def hsv2rgb(hsv_Nx3):\r\n #based on method in http://en.wikipedia.org/wiki/HSL_and_HSV#Converting_to_RGB\r\n hsv_Nx3 = numpy.asarray(hsv_Nx3, dtype=float)\r\n #we expect a 2D array so convert there if needed\r\n origShape = hsv_Nx3.shape\r\n hsv_Nx3 = hsv_Nx3.reshape([-1,3])\r\n\r\n H_ = (hsv_Nx3[:,0]%360)/60.0 #this is H' in the wikipedia version\r\n C = hsv_Nx3[:,1]*hsv_Nx3[:,2] #multiply H and V to give chroma (color intensity)\r\n X = C*(1-abs(H_%2-1))\r\n\r\n #rgb starts\r\n rgb=hsv_Nx3*0#only need to change things that are no longer zero\r\n II = (0<=H_)*(H_<1)\r\n rgb[II,0]=C[II]\r\n rgb[II,1]=X[II]\r\n II = (1<=H_)*(H_<2)\r\n rgb[II,0]=X[II]\r\n rgb[II,1]=C[II]\r\n II = (2<=H_)*(H_<3)\r\n rgb[II,1]=C[II]\r\n rgb[II,2]=X[II]\r\n II = (3<=H_)*(H_<4)\r\n rgb[II,1]=X[II]\r\n rgb[II,2]=C[II]\r\n II = (4<=H_)*(H_<5)\r\n rgb[II,0]=X[II]\r\n rgb[II,2]=C[II]\r\n II = (5<=H_)*(H_<6)\r\n rgb[II,0]=C[II]\r\n rgb[II,2]=X[II]\r\n m=(hsv_Nx3[:,2] - C)\r\n rgb += m.reshape([len(m),1])# V-C is sometimes called m\r\n return rgb.reshape(origShape)*2-1",
"def hsv(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)",
"def filter_rgb_to_hsv(np_img, display_np_info=True):\n\n if display_np_info:\n t = Time()\n hsv = sk_color.rgb2hsv(np_img)\n if display_np_info:\n util.np_info(hsv, \"RGB to HSV\", t.elapsed())\n return hsv",
"def hsvToRgb ( h, s = 0.0, v = 0.0 ):\n # Check if first argument is list\n if isinstance(h, list):\n s = h[1]\n v = h[2]\n h = h[0]\n if isinstance( h, int ):\n h /= 360.0\n if isinstance( s, int ):\n s /= 100.0\n if isinstance( v, int ):\n v /= 100.0\n\n if v == 0.0:\n return [0, 0, 0]\n\n h = h * 6.0\n i = int( math.floor( h ) )\n\n f = h - i\n p = v * ( 1.0 - s )\n q = v * ( 1.0 - ( s * f ) )\n t = v * ( 1.0 - ( s * ( 1.0 - f ) ) )\n\n if i == 0:\n r = v\n g = t\n b = p\n elif i == 1:\n r = q\n g = v\n b = p\n elif i == 2:\n r = p\n g = v\n b = t\n elif i == 3:\n r = p\n g = q\n b = v\n elif i == 4:\n r = t\n g = p\n b = v\n elif i == 5:\n r = v\n g = p\n b = q\n # To return int\n # r = int( math.floor( r * 255 ) )\n # g = int( math.floor( g * 255 ) )\n # b = int( math.floor( b * 255 ) )\n\n return [ r, g, b ]",
"def RGBtoHSL( rgb ):\n # R' = R/255 (G' = G/255, B' = B/255)\n Rp = rgb[2]/255\n Gp = rgb[1]/255\n Bp = rgb[0]/255\n Cmax = max(Rp,Gp,Bp)\n Cmin = min(Rp,Gp,Bp)\n Delta = Cmax - Cmin\n if Delta == 0:\n Hue = 0\n elif Cmax == Rp:\n Hue = 60*(((Gp-Bp)/Delta)%6)\n elif Cmax == Gp:\n Hue = 60*((Bp-Rp)/Delta + 2)\n else:\n Hue = 60*((Rp-Gp)/Delta + 4)\n\n Lit = (Cmax+Cmin)/2\n\n if Delta == 0:\n Sat = 0\n else:\n Sat = Delta/(1-abs(2*Lit-1))\n #print(\"H:\",Hue,\"S:\",Sat,\"L:\",Lit)\n return (Hue,Sat,Lit)",
"def _hls2rgb(self,h):\n h=h**self.exponent\n if(self.invert): h=1.0-h\n h=h*360.0\n h=Numeric.fmod(h,360.0)\n if(self.hls_hls):\n h=h/60.0\n else:\n if(h<120):\n h=h/120.0 # /* 0..1 Rot..(Orange)..Gelb */\n elif(h<180):\n h=h/60.0 - 1.0 # /* 1..2 Gelb..Gruen */\n elif(h<240):\n h=h/30.0 - 4.0 # /* 2..4 Gruen..Blaugruen..Blau*/\n else:\n h=h/60.0 # /* 4..6 Blau..Purpur..Rot */\n c=int(h)\n frac=h-c\n if (self.hls_l<=0.5):\n maxi=self.hls_l*(1.0+self.hls_s)\n else:\n maxi=self.hls_l+self.hls_s-self.hls_l*self.hls_s\n mini=2*self.hls_l-maxi;\n diff=maxi-mini;\n if(self.hls_s==0): # /* grau */\n return(1.0,1.0,1.0) \n else:\n if(c==0):\n return(maxi,mini+frac*diff,mini)\n elif(c==1):\n return(mini+(1.0-frac)*diff,maxi,mini)\n elif(c==2):\n return(mini,maxi,mini+frac*diff)\n elif(c==3):\n return(mini,mini+(1.0-frac)*diff,maxi)\n elif(c==4):\n return(mini+frac*diff,mini,maxi)\n else:\n return(maxi,mini,mini+(1.0-frac)*diff)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts an array [..., channels] of RGB values to Digital Y'CbCr (0255). RGB values are assumed to be normalized to (0, 1). Don't forget to cast to uint8 for pillow. | def rgb_to_ycbcr(image: np.ndarray) -> np.ndarray:
""" from RGB (0-1).
"""
if not is_rgb(image):
raise ValueError("Input needs to be an array of RGB values")
m = np.array(
[
[+065.481, +128.553, +024.966],
[-037.797, -074.203, +112.000],
[+112.000, -093.786, -018.214],
]
)
a = np.array([16, 128, 128])
return np.dot(image, m.T) + a | [
"def to_YCrCb(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)",
"def ycbcr2rgb(im):\n xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])\n rgb = im.astype(np.float)\n rgb[:, :, [1, 2]] -= 128\n rgb = rgb.dot(xform.T)\n np.putmask(rgb, rgb > 255, 255)\n np.putmask(rgb, rgb < 0, 0)\n return np.uint8(rgb)",
"def rgb2ycbcr(img, y_only=True):\n img.astype(np.float32)\n if y_only:\n rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0\n return rlt",
"def RGB2YCrCb(rgb_image):\n\n R = rgb_image[:, 0:1]\n G = rgb_image[:, 1:2]\n B = rgb_image[:, 2:3]\n Y = 0.299 * R + 0.587 * G + 0.114 * B\n Cr = (R - Y) * 0.713 + 0.5\n Cb = (B - Y) * 0.564 + 0.5\n\n Y = Y.clamp(0.0,1.0)\n Cr = Cr.clamp(0.0,1.0).detach()\n Cb = Cb.clamp(0.0,1.0).detach()\n return Y, Cb, Cr",
"def rgb_to_ycbcr(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n cb: torch.Tensor = (b - y) * 0.564 + delta\n cr: torch.Tensor = (r - y) * 0.713 + delta\n return torch.stack([y, cb, cr], -3)",
"def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)",
"def yuv2rgb(im):\n ## conflicting definitions exist depending on whether you use the full range\n ## of YCbCr or clamp out to the valid range. see here\n ## http://www.equasys.de/colorconversion.html\n ## http://www.fourcc.org/fccyvrgb.php\n from numpy import dot, ndarray, array\n # if not im.dtype == 'uint8':\n # raise ImageUtilsError('yuv2rgb only implemented for uint8 arrays')\n\n ## better clip input to the valid range just to be on the safe side\n yuv = ndarray(im.shape) ## float64\n yuv[:, :, 0] = im[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = im[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n ## ITU-R BT.601 version (SDTV)\n A = array([[1., 0., 0.701],\n [1., -0.886 * 0.114 / 0.587, -0.701 * 0.299 / 0.587],\n [1., 0.886, 0.]])\n A[:, 0] *= 255. / 219.\n A[:, 1:] *= 255. / 112.\n\n ## ITU-R BT.709 version (HDTV)\n # A = array([[1.164, 0., 1.793],\n # [1.164, -0.213, -0.533],\n # [1.164, 2.112, 0.]])\n\n rgb = dot(yuv, A.T)\n return rgb.clip(0, 255).astype('uint8')",
"def bgr2yuv422(im):\r\n\r\n # Avoid overflows in average calculations.\r\n im = im.astype(np.uint16)\r\n\r\n # Initialise the new image.\r\n imShape = im.shape\r\n converted = np.zeros((imShape[0], int(imShape[1]/2), 4))\r\n \r\n # Perform the conversion calculations.\r\n converted[:,:,0] = (0.2126*im[:,0:imShape[1]:2,2] + \r\n 0.7152*im[:,0:imShape[1]:2,1] + \r\n 0.0722*im[:,0:imShape[1]:2,0]) * (219.0/256.0) + 16.0\r\n converted[:,:,2] = (0.2126*im[:,1:imShape[1]:2,2] + \r\n 0.7152*im[:,1:imShape[1]:2,1] + \r\n 0.0722*im[:,1:imShape[1]:2,0]) * (219.0/256.0) + 16.0\r\n #print((((converted[:,:,0] + converted[:,:,2])/2.0)))\r\n converted[:,:,1] = (((im[:,0:imShape[1]:2,0]+im[:,1:imShape[1]:2,0])/2.0) -\\\r\n ((converted[:,:,0] + converted[:,:,2])/2.0)) * 0.5389 *\\\r\n (224.0/256.0) + 128\r\n converted[:,:,3] = (((im[:,0:imShape[1]:2,2]+im[:,1:imShape[1]:2,2])/2.0) -\\\r\n ((converted[:,:,0] + converted[:,:,2])/2.0)) * 0.635 *\\\r\n (224.0/256.0) + 128\r\n #print(converted.astype(np.uint8))\r\n # Return the converted image.\r\n return converted.astype(np.uint8)",
"def chroma_rgb(yuvArray):\n rgbArray = np.empty_like(yuvArray, np.uint8)\n for x, line in enumerate(yuvArray):\n for y, yuv in enumerate(line):\n R, G, B = YUV2RGB(yuv)\n if R > 255:\n R = 255\n if G > 255:\n G = 255\n if B > 255:\n B = 255\n if R < 0:\n R = 0\n if G < 0:\n G = 1\n if B < 0:\n B = 0\n yuv = (round(R), round(G), round(B))\n rgbArray[x][y] = yuv\n return rgbArray",
"def _rgb2y(self, im):\n if len(im.shape) < 3:\n return im\n return np.sum(im * [0.299, 0.587, 0.114], axis=2)",
"def bgr2rgb(x: np.ndarray) -> np.ndarray:\n return cv2.cvtColor(x, cv2.COLOR_BGR2RGB)",
"def bgr_to_yuv_channels(matrix):\n yuv_matrix = cv2.cvtColor(matrix, cv2.COLOR_BGR2YUV)\n return cv2.split(yuv_matrix)",
"def trans_bgr2rgb_seq(frames):\n return [bgr2rgb(frame) for frame in frames]",
"def RGB2BGR(x):\n out = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)\n return out",
"def yuv_channels_to_bgr_image(y_channel, u_channel, v_channel):\n yuv_image = cv2.merge((y_channel.astype(np.float32), u_channel.astype(np.float32), v_channel.astype(np.float32)))\n bgr_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)\n return bgr_image",
"def couleur01(r,g,b):\n return [r/255, g/255, b/255]",
"def rgb_to_rgb8():\n def map_component(vv, v):\n val = (vv * 2) + v\n if (val >= 3):\n return 255\n if (val == 2):\n return 120\n if (val == 1):\n return 47\n return 0\n\n vals = []\n for rr in xrange(0, 2):\n for gg in xrange(0, 2):\n for bb in xrange(0, 2):\n for r in xrange(0, 2):\n for g in xrange(0, 2):\n for b in xrange(0, 2):\n red = map_component(rr, r)\n green = map_component(gg, g)\n blue = map_component(bb, b)\n\n vals.append((red, green, blue))\n return vals",
"def yiq2rgb(imYIQ):\r\n\r\n return np.dot(imYIQ, (np.linalg.inv(CONSTANT_MATRIX)).T)",
"def skin_detect_ycbcr(frame):\n Cr_min, Cr_max, Cb_min, Cb_max = 133, 150, 77, 127\n # Constants for finding range of skin color in YCrCb\n min_YCrCb = np.array([0,Cr_min,Cb_min], np.uint8)\n max_YCrCb = np.array([255,Cr_max,Cb_max], np.uint8)\n\n # Convert image to YCrCb\n imageYCrCb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)\n # Find region with skin tone in YCrCb image\n skinRegion = cv2.inRange(imageYCrCb, min_YCrCb, max_YCrCb) \n # Do contour detection on skin region\n _, contours, hierarchy = cv2.findContours(skinRegion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n return imageYCrCb, contours, hierarchy"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a triangular matrix with random value between 0 and 1 uniformly. | def random_triangular_matrix(size: int, lower: bool = True) -> np.ndarray:
a = np.random.uniform(0, 1, (size, size))
if lower:
ind = np.triu_indices(5, 1)
else:
ind = np.tril_indices(5, 1)
a[ind] = 0
return a | [
"def triangular_distribution(p1, p2, p3):\n return random.triangular(p1, p2, p3)",
"def random_matrix(n):\n return [[random() for j in range(n)] for i in range(n)]",
"def uniformRandomRotation():\r\n q, r = np.linalg.qr(np.random.normal(size=(3, 3)))\r\n M = np.dot(q, np.diag(np.sign(np.diag(r))))\r\n if np.linalg.det(M) < 0: # Fixing the flipping\r\n M[:, 0] = -M[:, 0] # det(M)=1\r\n return M",
"def generate_matrix(dim, var):\n m = np.random.normal(0, math.sqrt(var), (dim, dim))\n a = (m + np.transpose(m)) / 2\n return a",
"def sample_matrix(dim, bound):\n return np.random.uniform(low=-bound, high=bound, size=(dim, dim))",
"def _generate_random_matrix(self):\n return np.random.random_integers(-5, 5, (self.dimension, self.dimension))",
"def generate_random_matrix(n):\n return [[random.randint(1, 50) for i in range(n)] for j in range(n)]",
"def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5",
"def randmatrix(m, n, lower=-0.5, upper=0.5):\n return np.array([random.uniform(lower, upper) for i in range(m*n)]).reshape(m, n)",
"def generate_matrix(rows, cols):\n matrix_random = np.random.rand(rows, cols)\n return matrix_random",
"def generate_matrix(rand_range,n_value):\n matrix = np.empty([n_value,n_value])\n for x in range(rand_range):\n for y in range(rand_range):\n matrix[x][y] = rand.randrange(1, rand_range+1) % n_value+1\n\n return matrix",
"def random(dim):\n return np.random.uniform([-1]*dim, [1]*dim)",
"def gen_mat(n, dense):\n z = np.zeros((n,n))\n for row in range(n):\n r = []\n total = 0\n for col in range(n):\n if z[row,col] == 0:\n r.append(col)\n else:\n total +=1\n random.shuffle(r)\n need = int((n* dense) - total )\n for col in r[:need]:\n z[row, col] = 1\n\n for col in range(n):\n r = []\n total = 0\n for row in range(n):\n if z[row,col] == 0:\n r.append(row)\n else:\n total +=1\n random.shuffle(r)\n need = int((n* dense) - total )\n for row in r[:need]:\n z[row, col] = 1\n return z",
"def init_rand_matrix(self, nrow, ncol, seed=None):\n if not seed:\n seed = np.random.randint(1000)\n np.random.seed(seed)\n return(np.random.dirichlet(np.ones(nrow), size=ncol).T)",
"def random_unit_vec(dim):\n import numpy as np\n import random as r\n\n r.seed()\n vec = np.zeros(dim)\n for d in range(dim):\n vec[d] = r.uniform(-1.0,1.0)\n\n vec = 1.0/np.linalg.norm(vec)*vec\n\n return vec;",
"def random_element(self):\n from sage.combinat.gelfand_tsetlin_patterns import GelfandTsetlinPatterns\n n = self._n\n toprow = [n-i for i in range(n)]\n gt = GelfandTsetlinPatterns(top_row = toprow, strict = True)\n randomgt = gt.random_element()\n A = AlternatingSignMatrices(n)\n return A.from_monotone_triangle(randomgt)",
"def gen_diagonal_matrix(size):\n matrix = np.zeros(shape=(size, size), dtype=np.float128)\n for i in range(0, size):\n for j in range(0, size):\n if i == j:\n matrix[i][j] = uniform(-size, size)\n vector_x = MatrixGenerator.gen_vector(size)\n vector_b = np.dot(matrix, vector_x).reshape(size, 1)\n return matrix, vector_x, vector_b",
"def random_matrix(n):\n from random import shuffle\n\n a = list(range(n + 1))\n shuffle(a)\n\n # Use slicing to left rotate\n m = [a[i:] + a[:i] for i in range(n + 1)]\n\n # Shuffle rows in matrix\n shuffle(m)\n\n # Shuffle cols in matrix (optional)\n m = list(map(list, zip(*m))) # Transpose the matrix\n shuffle(m)\n\n return m",
"def rand_unitary(N):\n U = unitary_group.rvs(N)\n U_real = th.tensor(U.real)\n U_imag = th.tensor(U.imag)\n return make_complex_matrix(U_real, U_imag)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs batched calculation of `v^T A v` transform. Special case of bilinear form `x^T A y` | def batch_vTAv(A: np.ndarray, v: np.ndarray) -> np.ndarray:
""" Faster than
Av = np.matmul(A, v[...,:,None]) # [B, X, 1]
return np.matmul(v[...,None,:], Av).squeeze((-2, -1)) # [B]
"""
return np.einsum("...k,...kl,...l->...", v, A, v) | [
"def brute_multiply(x, y):\n \n n = x.shape[0]\n res = np.zeros(x.shape)\n \n for i in range(n):\n for j in range(n):\n for k in range(n):\n res[i, j] += x[i, k] * y[k, j]\n \n return res",
"def Transform(B, A, T, nullFill = True): \n invT = np.linalg.inv(T) ## TODO change to own inverse transform\n \n if not nullFill:\n tImg = np.zeros(A.Values.shape, dtype=float) # TODO how should it be initilized\n else: \n tImg = np.full(A.Values.shape, None, dtype=float) # TODO how should it be initilized \n (minX, minY, maxX, maxY) = getExtremeValues(B)\n for i in range(tImg.shape[0]):\n for j in range(tImg.shape[1]):\n x = i * A.Spacing[0] + A.Origin[0]\n y = j * A.Spacing[1] + A.Origin[1]\n pos = np.dot(invT, np.array([x,y,1], dtype=float, copy=True))\n #print(\"x: {0}, y:{1}, T: {2}\".format(x,y, invT))\n #print(pos)\n if not (minX <= pos[0] <= maxX and minY <= pos[1] <= maxY): \n continue \n val = BilinearInterpolate(pos[0], pos[1], B)\n tImg[i,j] = val\n return Image(tImg, A.Spacing, A.Origin)",
"def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache",
"def ulab_bilinear_interpolation():\n GRID_DATA[1::2, ::2] = SENSOR_DATA[:-1, :]\n GRID_DATA[1::2, ::2] += SENSOR_DATA[1:, :]\n GRID_DATA[1::2, ::2] /= 2\n GRID_DATA[::, 1::2] = GRID_DATA[::, :-1:2]\n GRID_DATA[::, 1::2] += GRID_DATA[::, 2::2]\n GRID_DATA[::, 1::2] /= 2",
"def inefficient_outer(x, y):\n result = np.zeros((len(x), len(y))) \n for i in range(len(x)):\n for j in range(len(y)):\n result[i, j] = x[i]*y[j]\n \n return result",
"def trilinear(T,W1,W2,W3):\r\n def matrix(W):\r\n if len(W.shape)==1:\r\n return np.matrix(W).T\r\n else:\r\n return W\r\n W1,W2,W3 = matrix(W1),matrix(W2),matrix(W3)\r\n N1 = xrange(W1.shape[1])\r\n N2 = xrange(W2.shape[1])\r\n N3 = xrange(W3.shape[1])\r\n NT = xrange(W1.shape[0])\r\n\r\n X3 = np.zeros((W1.shape[1], W2.shape[1], W3.shape[1]))\r\n # TODO: figure out the equivalent numpy routines\r\n if type(T) is str:\r\n if T == 'I':\r\n for (i1,i2,i3,j) in itertools.product(N1,N2,N3,NT):\r\n X3[i1,i2,i3] += W1[j,i1] * W2[j,i2] * W3[j,i3]\r\n else:\r\n for (i1,i2,i3,j1,j2,j3) in itertools.product(N1,N2,N3,NT,NT,NT):\r\n X3[i1,i2,i3] += T[j1,j2,j3] * W1[j1,i1] * W2[j2,i2] * W3[j3,i3]\r\n return X3",
"def apply(self,i,x):\n #applies the ith map to the point x\n y = self.A[i,:,:] @ x + self.b[i,:]\n return y",
"def batched_dot(\n a: torch.FloatTensor,\n b: torch.FloatTensor,\n) -> torch.FloatTensor:\n return _batched_dot_manual(a, b)",
"def advect (u, v):\r\n # NOTICE: memory usage might be too high, could optimize\r\n\r\n # Store the values from timestep n\r\n un = u\r\n vn = v\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n oldpos = coord (i,j) - dt * np.stack((u[i,j], v[i,j]))\r\n u[i,j], v[i,j] = interpolate (un, vn, oldpos)\r\n\r\n\r\n # Return values for timestep n+1\r\n return u, v",
"def ResolTriSup(T,b):\r\n\r\n\r\n n,m=T.shape\r\n x=np.zeros(n)\r\n for i in range(n-1,-1,-1):\r\n S=T[i,i+1:]@x[i+1:]\r\n x[i]=(b[i]-S)/T[i,i]\r\n x=np.reshape(x,b.shape)\r\n return x",
"def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out",
"def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3",
"def _apply_cost_to_vec(\n self, vec: jnp.ndarray, axis: int = 0, fn=None\n ) -> jnp.ndarray:\n vec = jnp.reshape(vec, self.grid_size)\n accum_vec = jnp.zeros_like(vec)\n indices = list(range(1, self.grid_dimension))\n for dimension, geom in enumerate(self.geometries):\n cost = geom.cost_matrix\n ind = indices.copy()\n ind.insert(dimension, 0)\n if axis == 0:\n cost = cost.T\n accum_vec += jnp.sum(\n jnp.tensordot(cost, vec, axes=([0], [dimension])),\n axis=indices,\n keepdims=True\n ).transpose(ind)\n return accum_vec.ravel()",
"def linearBC(a0, a1, i):\n return lambda x, t: a0 + a1*x[i]",
"def affine_forward(x, w, b):\n num_train = x.shape[0]\n x_flatten = x.reshape((num_train, -1))\n out = np.dot(x_flatten, w) + b\n cache = (x, w, b)\n return out, cache",
"def test_base_and_coeff_batching_support(self):\n x = np.array([-1, -2, -3])\n y = np.array([1, 2, 3])\n op = qml.s_prod(y, qml.RX(x, 0))\n mat = op.matrix()\n true_mat = qml.math.stack([qml.s_prod(j, qml.RX(i, 0)).matrix() for i, j in zip(x, y)])\n assert qml.math.allclose(mat, true_mat)\n assert mat.shape == (3, 2, 2)",
"def apply_matrix(self, A):\n assert self.is_vector(), 'Can only apply matrices to vector-valued functions'\n C = np.matmul(A, self.coeffs[..., None])\n assert C.shape[-1] == 1 # this should have created a new singleton axis\n return BSplineFunc(self.kvs, np.squeeze(C, axis=-1))",
"def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state",
"def _inner_product_a0(self, tangent_vec_a, tangent_vec_b, vertex_areas_bp):\n return self.a0 * gs.sum(\n vertex_areas_bp\n * gs.einsum(\"...bi,...bi->...b\", tangent_vec_a, tangent_vec_b),\n axis=-1,\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs a batched inner product over the last dimension. Replacement for deprecated `from numpy.core.umath_tests import inner1d`. | def batch_inner(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray:
if verify and a.shape != b.shape:
raise ValueError("All dimensions have to be equal")
if a.shape[-1] == 0:
return np.empty_like(a)
return np.einsum("...i,...i->...", a, b) # faster than np.sum(a * b, axis=-1) | [
"def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]",
"def test_batched_outer_product():\n rng = tl.check_random_state(1234)\n batch_size = 3\n\n X = tl.tensor(rng.random_sample((batch_size, 4, 5, 6)))\n Y = tl.tensor(rng.random_sample((batch_size, 3)))\n Z = tl.tensor(rng.random_sample((batch_size, 2)))\n res = batched_outer([X, Y, Z])\n true_res = tenalg.tensordot(X, Y, (), batched_modes=0)\n true_res = tenalg.tensordot(true_res, Z, (), batched_modes=0)\n\n testing.assert_array_almost_equal(res, true_res)",
"def _outer_product_multiple(cls, tensors):\n result = tensors[0]\n\n for i in range(len(tensors) - 1):\n result = cls._outer_product(result, tensors[i + 1])\n\n return result",
"def outer_product(input_sets, axis=0):\n out = cartesian_product(input_sets)\n return np.prod(out, axis=axis)\n\n # try:\n # from pyapprox.cython.utilities import outer_product_pyx\n # # fused type does not work for np.in32, np.float32, np.int64\n # # so envoke cython cast\n # if np.issubdtype(input_sets[0][0], np.signedinteger):\n # return outer_product_pyx(input_sets, 1)\n # if np.issubdtype(input_sets[0][0], np.floating):\n # return outer_product_pyx(input_sets, 1.)\n # else:\n # return outer_product_pyx(input_sets, input_sets[0][0])\n # except ImportError:\n # print('outer_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = len(input_sets[ii])\n # num_elems *= sizes[ii]\n\n # # try:\n # # from pyapprox.weave import c_outer_product\n # # return c_outer_product(input_sets)\n # # except:\n # # print ('outer_product extension failed')\n\n # result = np.empty((num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # result[ii] = 1.0\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # result[ii] *= input_sets[jj][multi_index[jj]]\n\n # return result",
"def _outer_product(cls, a, b):\n return np.reshape(np.outer(a, b), a.shape + b.shape)",
"def inner_product_array(\n num_states, num_rows, num_cols, max_vecs_per_node, verbosity=1):\n col_vec_handles = [mr.VecHandlePickle(join(data_dir, col_vec_name%col_num))\n for col_num in mr.range(num_cols)]\n row_vec_handles = [mr.VecHandlePickle(join(data_dir, row_vec_name%row_num))\n for row_num in mr.range(num_rows)]\n\n generate_vecs(data_dir, num_states, row_vec_handles+col_vec_handles)\n\n my_VS = mr.VectorSpaceHandles(\n np.vdot, max_vecs_per_node=max_vecs_per_node, verbosity=verbosity)\n\n prof = cProfile.Profile()\n start_time = time.time()\n prof.runcall(\n my_VS.compute_inner_product_array, *(col_vec_handles, row_vec_handles))\n total_time = time.time() - start_time\n prof.dump_stats('IP_array_r%d.prof'%mr.parallel.get_rank())\n\n return total_time",
"def batched_tensordot(x, y, axes=2):\n return _tensordot_as_dot(x, y, axes, dot=batched_dot, batched=True)",
"def outer_product(a, b, batch=False):\n if batch:\n return batch_outer_product(a, b)\n a, b = normalize_and_check_ndim([a, b], 1)\n # The outer product is equivalent to matrix multiplication a * b\n # where the vector a is interpreted as a column matrix and the\n # vector b as a row matrix. The following reshaping and\n # multiplication accomplishes this.\n return a[:, np.newaxis] * b[np.newaxis, :]",
"def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder",
"def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))",
"def batch_outer_sum(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # Due to broadcasting, this sum works analogously to batch matrix\n # multiplication. See also comments in batch_outer_product().\n return a[:, :, np.newaxis] + b[:, np.newaxis, :]",
"def innerProduct(a,b):\n inner = 0 #initial varaiable return at end\n for i in range(len(a)):# run through each entry in the vector\n inner += a[i] * b[i] # multiply one entry from a to another b\n return inner # return the final result",
"def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)",
"def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))",
"def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)",
"def outer_product(data_a, data_b):\n vec_a = data_a.reshape(-1, 1)\n vec_b = data_b.reshape(1, -1)\n \n return np.dot(vec_a, vec_b)",
"def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z",
"def inefficient_outer(x, y):\n result = np.zeros((len(x), len(y))) \n for i in range(len(x)):\n for j in range(len(y)):\n result[i, j] = x[i]*y[j]\n \n return result",
"def _row_tensor_product(dms):\n if len(dms) == 0:\n raise ValueError(\"Tensor product arrays sequence should not be empty.\")\n for dm in dms:\n if dm.ndim != 2:\n raise ValueError(\"Tensor product arguments should be 2-d arrays.\")\n\n tp_nrows = dms[0].shape[0]\n tp_ncols = 1\n for dm in dms:\n if dm.shape[0] != tp_nrows:\n raise ValueError(\"Tensor product arguments should have \"\n \"same number of rows.\")\n tp_ncols *= dm.shape[1]\n tp = np.zeros((tp_nrows, tp_ncols))\n tp[:, -dms[-1].shape[1]:] = dms[-1]\n filled_tp_ncols = dms[-1].shape[1]\n for dm in dms[-2::-1]:\n p = - filled_tp_ncols * dm.shape[1]\n for j in range(dm.shape[1]):\n xj = dm[:, j]\n for t in range(-filled_tp_ncols, 0):\n tp[:, p] = tp[:, t] * xj\n p += 1\n filled_tp_ncols *= dm.shape[1]\n\n return tp"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
`probs` values ndarray `k` take the smallest `k` elements, if `reverse` is False and the largest `k` if `reverse` is True `axis` sorting and selection axis. | def batchtopk(
probs: np.ndarray, k: Optional[int] = None, axis: int = -1, reverse: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
if k is not None and k <= 0:
raise ValueError("k must be larger than 0. Use None to chose all elements.")
if axis != -1:
raise ValueError("Only last axis supported atm")
if len(probs.shape) <= 1:
raise ValueError("probs must be at least 2-dimensional")
if reverse:
sign = -1
else:
sign = 1
indices = np.argsort(sign * probs, axis=-1) # use argpartition?
probs = np.take_along_axis(probs, indices[..., :k], axis=-1)
return indices, probs | [
"def tflite_top_k_probs(probs, k):\n\n if k > 0:\n return np.flip(probs[0].argsort()[-k:])\n else:\n return np.flip(probs[0].argsort())",
"def old_topk_sorted_implementation(X, k, axis, largest):\n sorted_indices = numpy.argsort(X, axis=axis)\n sorted_values = numpy.sort(X, axis=axis)\n if largest:\n sorted_indices = numpy.flip(sorted_indices, axis=axis)\n sorted_values = numpy.flip(sorted_values, axis=axis)\n ark = numpy.arange(k)\n topk_sorted_indices = numpy.take(sorted_indices, ark, axis=axis)\n topk_sorted_values = numpy.take(sorted_values, ark, axis=axis)\n return topk_sorted_values, topk_sorted_indices",
"def tf_top_k_probs(probs, k):\n\n if k > 0:\n return probs.argsort()[-k:][::-1]\n else:\n return probs.argsort()[:][::-1]",
"def np_topk(x,k,dim=0):\n topk_index = np.argsort(-x,axis=dim)[:k]",
"def top_k_predictions(pred,k):\n return [np.argsort(pred[i])[::-1][:k].tolist() for i in range(len(pred))]",
"def greatest(a, k: int, axis: int = -1):\n if k == a.shape[axis]:\n return np.flip(np.sort(a, axis=axis), axis=axis)\n a = np.partition(a, -k, axis=axis)\n take_this = np.arange(-k % a.shape[axis], a.shape[axis])\n a = np.take(a, take_this, axis=axis)\n a = np.flip(np.sort(a, axis=axis), axis=axis)\n return a",
"def topk(self, k, dim=-1, largest=True, sorted=True):\n return array_funcs.topk(self, k, dim, largest, sorted)",
"def least(a, k: int, axis: int = -1):\n if k == a.shape[axis]:\n return np.sort(a, axis=axis)\n a = np.partition(a, k - 1, axis=axis)\n take_this = np.arange(k)\n a = np.take(a, take_this, axis=axis)\n a = np.sort(a, axis=axis)\n return a",
"def topK(arr,k):\n c=np.copy(arr)\n value=[]\n idxs=[]\n for i in range(k):\n idx=np.argmax(c)\n idxs.append(idx)\n val=float(\"{0:.5f}\".format(c[idx]))\n value.append(val)\n c[idx]=0\n return idxs,value",
"def select_k_best(points, k):\n sorted_prob = points[points[:, 2].argsort(), :2]\n start = min(k, points.shape[0])\n return sorted_prob[-start:, :]",
"def _sample_top_k(self, probs):\n sorted_probs, sorted_indices = probs.sort(dim=-1, descending=True)\n new_distribution = sorted_probs[:, :self.top_k] / sorted_probs.sum(-1, keepdim=True)\n sample_indices = self._sample(new_distribution)\n sample = sorted_indices.gather(-1, sample_indices)\n return sample",
"def sort_k(data, k):\n if isinstance(data, np.ndarray):\n data = data.tolist()\n return(sorted(data)[:k])",
"def sort(inputs: jnp.ndarray,\n axis: int = -1,\n topk: int = -1,\n num_targets: int = None,\n **kwargs) -> jnp.ndarray:\n return apply_on_axis(_sort, inputs, axis, topk, num_targets, **kwargs)",
"def indices_of_top_k(arr, k):\n return np.sort(np.argpartition(np.array(arr), -k)[-k:])",
"def fetch_top_k(vect, mat, k):\n resultant = np.dot(mat, vect)\n arglist = np.argsort(resultant)\n arglist = arglist[-1:(-1 - k):-1]\n return arglist, resultant",
"def get_top_k_indices(arr, k=1):\n\n return arr.argsort()[-k:][::-1]",
"def top_k_over_axis(inputs, k, axis=-1, **kwargs):\n with tf.name_scope('top_k_along_axis'):\n if axis == -1:\n return tf.nn.top_k(inputs, k, **kwargs)\n\n perm_order = list(range(inputs.shape.ndims))\n perm_order.append(perm_order.pop(axis))\n inv_order = [perm_order.index(i) for i in range(len(perm_order))]\n\n input_perm = tf.transpose(inputs, perm_order)\n input_perm_sorted, sort_indices_perm = tf.nn.top_k(\n input_perm, k=k, **kwargs)\n\n input_sorted = tf.transpose(input_perm_sorted, inv_order)\n sort_indices = tf.transpose(sort_indices_perm, inv_order)\n return input_sorted, sort_indices",
"def naive_topK(matrix, K):\n sorted_data = -np.sort(-matrix)\n sorted_data = sorted_data[0:K]\n sorted_idx = np.argsort(-matrix)\n sorted_idx = sorted_idx[0:K]\n return sorted_data, sorted_idx",
"def k_smallest_sorted(a, k):\r\n k_smallest_idxs = np.argpartition(a, k)[:k]\r\n return k_smallest_idxs[np.argsort(a[k_smallest_idxs])]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calcuates the sum of the logs of the diagonal elements (batchwise if necessary) | def logtrace(m: np.ndarray) -> np.ndarray:
""" note: performance cannot easily be improve by numba.
`np.diagonal` not supported by numba 0.52.0
"""
return np.sum(np.log(np.diagonal(m, axis1=-2, axis2=-1)), axis=-1) | [
"def trace(X):\r\n return extract_diag(X).sum()",
"def batch_trace(x, dim1=-2, dim2=-1):\n return torch.diagonal(x, dim1=dim1, dim2=dim2).sum(-1)",
"def trace(X):\n return extract_diag(X).sum()",
"def ln_sum_i_neq_j(x):\n iw_size = x.size(0)\n batch_size = x.size(1)\n\n # TODO: Would torch.expand instead of torch.repeat make this faster?\n inv_mask = torch.eye(iw_size).unsqueeze(dim=2).repeat(1, 1, batch_size)\n x_masked = x.view(1, iw_size, batch_size) - inv_mask*1000000.0\n return logsumexp(x_masked, dim=1)",
"def ln_sum_i_neq_j(x):\n\tiw_size = x.size(0)\n\tbatch_size = x.size(1)\n\n\t# TODO: Would torch.expand instead of torch.repeat make this faster?\n\tinv_mask = torch.eye(iw_size).unsqueeze(dim=2).repeat(1, 1, batch_size)\n\tx_masked = x.view(1, iw_size, batch_size) - inv_mask*1000000.0\n\treturn logsumexp(x_masked, dim=1)",
"def logSum(ls):\n\timport math\n\treturn math.log10(sum(ls))",
"def logsum(x):\n # Use the max to normalize, as with the log this is what accumulates\n # the less errors\n vmax = x.max(axis=0)\n out = np.log(np.sum(np.exp(x - vmax), axis=0))\n out += vmax\n return out",
"def trace(self):\n diagonal_sum = 0\n if not self.is_square():\n raise(ValueError, \"Cannot calculate the trace of a non-square matrix.\")\n \n # TODO - your code here\n # Nested for loop to calculate the sum of diagonal matrix\n for i in range(len(self.g)):\n for j in range(len(self.g[0])):\n if (i == j):\n diagonal_sum += self.g[i][j]\n return diagonal_sum",
"def sum_entropy(matrix):\n se = 0\n matrix_bis = np.zeros([2*matrix.shape[0]])\n for i in range(0, matrix.shape[0]):\n for j in range(0, matrix.shape[0]):\n matrix_bis[i+j] += matrix[i, j]\n for v in matrix_bis:\n if v > 0:\n se -= v*math.log(v)\n return se",
"def _trace_sparse(op):\n return np.sum(op.diagonal())",
"def _compute_log_value(self, *input_tensors):",
"def sum_diag(max_lines):\r\n dsum = 1 # sum of diagonals\r\n cpt = 1 # number of lines processed\r\n val = 1 # value of the current place in the square\r\n inc = 0 # the increment between number for one line\r\n \r\n while cpt < max_lines:\r\n cpt += 2\r\n inc += 2\r\n \r\n for corner in range(4):\r\n val += inc\r\n dsum += val\r\n\r\n return dsum",
"def logsum(x):\n mmax = np.max(x)\n return mmax + np.log(np.sum(np.exp(x-mmax)))",
"def logsumexp(logv):\n res = logzero()\n for val in logv:\n res = logsum_pair(res, val)\n return res",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.log(), diag_shape=self.diag_shape)",
"def log_cumsum(probs, dim=1):\n return torch.log(torch.cumsum(probs, dim=dim) + EPS)",
"def _compute_log_value(self):",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())",
"def log_sum_exp(x):\r\n # TF ordering\r\n axis = len(x.size()) - 1\r\n m, _ = torch.max(x, dim=axis)\r\n m2, _ = torch.max(x, dim=axis, keepdim=True)\r\n return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shifts `pvals` by the largest value in the last dimension before the exp is calculated to prevent overflow (batchwise if necessary). Can be used if probabilities are normalized again later. | def shiftedexp(pvals: np.ndarray) -> np.ndarray:
if pvals.shape[-1] == 0:
return np.empty_like(pvals)
return np.exp(pvals - np.amax(pvals, axis=-1)[..., None]) | [
"def benjamini_hochberg_step_down(pvals):\r\n tmp = fdr_correction(pvals)\r\n corrected_vals = empty(len(pvals))\r\n max_pval = 1.\r\n for i in argsort(pvals)[::-1]:\r\n if tmp[i] < max_pval:\r\n corrected_vals[i] = tmp[i]\r\n max_pval = tmp[i]\r\n else:\r\n corrected_vals[i] = max_pval\r\n return corrected_vals",
"def _correct_p_values(self, p_vals):\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\n corrected_p_vals = []\n for p_val in p_vals:\n if p_val is not None:\n corrected_p_vals.append(min(p_val * num_tests, 1))\n else:\n corrected_p_vals.append(p_val)\n return corrected_p_vals",
"def _correct_p_values(self, p_vals):\r\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\r\n corrected_p_vals = []\r\n for p_val in p_vals:\r\n if p_val is not None:\r\n corrected_p_vals.append(min(p_val * num_tests, 1))\r\n else:\r\n corrected_p_vals.append(p_val)\r\n return corrected_p_vals",
"def posterior_predictive_pvals(self):\n pvals = {}\n for gene in self.ppc:\n z_true = self.sample[gene]\n z = st.laplace.rvs(*st.laplace.fit(self.ppc[gene]), size=100_000)\n # Rule of thumb: for 100,000 samples, report p-values to the thousands place\n # Add pseudocount for instances where outlier is more extreme than every other sample\n pvals[gene] = round((np.sum(z_true < z) + 1) / (len(z) + 1), 3)\n self.ppp = pd.DataFrame(pvals.items(), columns=[\"Gene\", \"Pval\"]).sort_values(\n \"Pval\"\n )\n self.ppp = self.ppp.set_index(\"Gene\", drop=True)",
"def test_correct_p_values(self):\r\n exp = [0.003, 0.006, 0.003]\r\n obs = self.mc._correct_p_values([0.001, 0.002, 0.001])\r\n assert_almost_equal(obs, exp)",
"def pmax(*vals, **kw):\n if kw.has_key['processes']:\n prcesses = kw['processes']\n return ePool(processes=processes).max(*vals, **kw)",
"def correctPValues(pvalues, method = 'BH'):\n \n pvals = np.asarray(pvalues);\n\n if method.lower() in ['bh', 'fdr']:\n \n pvals_sorted_ids = np.argsort(pvals);\n pvals_sorted = pvals[pvals_sorted_ids]\n sorted_ids_inv = pvals_sorted_ids.argsort()\n\n n = len(pvals);\n bhfactor = np.arange(1,n+1)/float(n);\n\n pvals_corrected_raw = pvals_sorted / bhfactor;\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected>1] = 1;\n \n return pvals_corrected[sorted_ids_inv];\n \n elif method.lower() in ['b', 'fwer']:\n \n n = len(pvals); \n \n pvals_corrected = n * pvals;\n pvals_corrected[pvals_corrected>1] = 1;\\\n \n return pvals_corrected;\n \n #return reject[pvals_sortind.argsort()]",
"def max(vals):\n\n return max(vals)",
"def test_correct_p_values_large_correction(self):\r\n exp = [1, None, 0.03, 0.03]\r\n obs = self.mc._correct_p_values([0.5, None, 0.01, 0.01])\r\n self.compare_multiple_level_array(obs, exp)",
"def adjustPValues(p_values, method=\"fdr\"):\n\tadjusted_p_values = p_values[:]\n\tn = len(p_values)\n\tif method.lower() == \"bh\" or method.lower() == 'fdr':\n\t\tni = range(n,0,-1) # from n to 1\n\t\t# Sort the P values and keep track of the indices\n\t\tindexed_pv = sorted(zip(p_values, range(n)), reverse=True)\n\t\t(pvals,inds) = zip(*indexed_pv)\n\t\t# adjust\n\t\tnewp = [(float(n)/ni[xi])*pvals[xi] for xi in range(n)]\n\t\tcum_min_p = [min(newp[0:xi]) for xi in range(1,n+1)]\n\t\tadjp_sorted = [min(p,1.0) for p in cum_min_p]\n\t\t# re-sort\n\t\tadjusted_p_values = [-1]*n\n\t\tfor xi in range(n):\n\t\t\tadjusted_p_values[inds[xi]] = adjp_sorted[xi]\n\telif method.lower() == 'bonferroni':\n\t\tadjusted_p_values = [min(n*p,1.0) for p in p_values]\n\treturn adjusted_p_values",
"def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))",
"def setp(self, pvals):\n self.parameters[:] = pvals",
"def vals_m(self, y_vals):\n y_vals = np.array(y_vals)\n y_vals = (np.ones_like(y_vals) * self.closest_y - y_vals) * self.resolution\n return self.vals(y_vals) / self.resolution",
"def log_normalize(self, log_vals, axis = 1):\n\n log_vals -= np.expand_dims(np.max(log_vals, axis = axis), axis = axis)\n log_vals = np.exp(log_vals)\n return log_vals/np.expand_dims(np.sum(log_vals, axis = axis), axis = axis)",
"def _compute_probabilities(criterion_values):\n\n delta = np.array(criterion_values) - min(criterion_values)\n prob = np.exp(-delta / 2)\n return prob / np.sum(prob)",
"def fisher_p(p_values):\n\treturn -2*np.sum(np.log(p_values))",
"def log_normalize(log_vals, axis = 1):\n\n log_vals -= np.expand_dims(np.max(log_vals, axis = axis), axis = axis)\n log_vals = np.exp(log_vals)\n return log_vals/np.expand_dims(np.sum(log_vals, axis = axis), axis = axis)",
"def p(vals, errs, truevals, nchunks=500, ntruechunks=1000):\n out = np.array([])\n\n chunks = itertools.izip([vals[i:i+nchunks] for i in xrange(0, len(vals), nchunks)], \n [errs[i:i+nchunks] for i in xrange(0, len(vals), nchunks)])\n \n for chunk, errchunk in chunks:\n trueout = np.zeros(len(chunk))\n \n covIs = 1./errchunk**2.\n A = 1./np.sqrt( (2.*np.pi)**len(vals[0]) * np.prod(errchunk**2., axis=1 ) )\n\n truechunks = (truevals[i:i+ntruechunks] for i in xrange(0, len(truevals), ntruechunks))\n for truechunk in truechunks:\n diff = chunk[:,np.newaxis,:]-truechunk[np.newaxis,:,:]\n\n B = -0.5 * np.sum(diff**2.*covIs[:,np.newaxis], axis=2)\n C = A[:,np.newaxis] * np.exp(B)\n \n trueout += np.sum(C, axis=1)\n\n out = np.concatenate((out, trueout))\n \n return out",
"def plot_pvalues(pvals, axeslist=None, **styleArgs):\n if axeslist is None:\n pvalfig = pylab.figure(figsize=(6,6))\n pvalfig.set_label('pvalues')\n pvalaxes = pylab.subplot(211)\n pvalhistaxes = pylab.subplot(212)\n axeslist = (pvalaxes, pvalhistaxes)\n else:\n (pvalaxes, pvalhistaxes) = axeslist\n style = {'histtype':'step', 'color':'k'}\n style.update(styleArgs)\n\n ### Plot the histogram. ###\n if len(pvals) > 50:\n nbins = len(pvals)/5\n ndf, bins, patches = pvalhistaxes.hist(pvals, bins=nbins, **style)\n else:\n ndf, bins, patches = pvalhistaxes.hist(pvals, **style)\n\n ### Plot the CDF. ###\n cdfx = numpy.sort(pvals)\n cdfy = numpy.arange(len(pvals))\n cdfydiff = cdfy - len(pvals) * cdfx\n pvalaxes.plot(cdfx, cdfydiff, ls='steps', color=style['color'])\n\n ### Plot horizontal line at expected bin value. ###\n expbinn = float(len(pvals))/len(ndf)\n sigmaexpbinn = numpy.sqrt(expbinn)\n pvalhistaxes.axhline(expbinn, ls=':', color=style['color'])\n pvalhistaxes.axhline(expbinn + sigmaexpbinn, ls=':', color=style['color'])\n pvalhistaxes.axhline(expbinn - sigmaexpbinn, ls=':', color=style['color'])\n\n ### Plot horizontal line zero. ###\n pvalaxes.axhline(0.0, ls=':', color=style['color'])\n\n pvalhistaxes.set_xlabel('p value')\n pvalhistaxes.set_ylabel('N')\n pvalaxes.set_xlabel('p value')\n pvalaxes.set_ylabel('CDF excess')\n return axeslist"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sample from list of probabilities `pvals` with replacement. The probabilities don't need to be normalized. | def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:
return Sampler(np.cumsum(pvals)) | [
"def _correct_p_values(self, p_vals):\r\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\r\n corrected_p_vals = []\r\n for p_val in p_vals:\r\n if p_val is not None:\r\n corrected_p_vals.append(min(p_val * num_tests, 1))\r\n else:\r\n corrected_p_vals.append(p_val)\r\n return corrected_p_vals",
"def _correct_p_values(self, p_vals):\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\n corrected_p_vals = []\n for p_val in p_vals:\n if p_val is not None:\n corrected_p_vals.append(min(p_val * num_tests, 1))\n else:\n corrected_p_vals.append(p_val)\n return corrected_p_vals",
"def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())",
"def random_sample_from_prod_discrete_domain(list_of_list_of_vals, num_samples):\n return random_sample_from_discrete_domain(list_of_list_of_vals, num_samples)",
"def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r",
"def setp(self, pvals):\n self.parameters[:] = pvals",
"def posterior_predictive_pvals(self):\n pvals = {}\n for gene in self.ppc:\n z_true = self.sample[gene]\n z = st.laplace.rvs(*st.laplace.fit(self.ppc[gene]), size=100_000)\n # Rule of thumb: for 100,000 samples, report p-values to the thousands place\n # Add pseudocount for instances where outlier is more extreme than every other sample\n pvals[gene] = round((np.sum(z_true < z) + 1) / (len(z) + 1), 3)\n self.ppp = pd.DataFrame(pvals.items(), columns=[\"Gene\", \"Pval\"]).sort_values(\n \"Pval\"\n )\n self.ppp = self.ppp.set_index(\"Gene\", drop=True)",
"def set_probabilities(self, p=[]):\n self.probabilities = p[:]",
"def sample_distribution(numbers, probabilities, num_samples):\n intervals = []\n intervals.append(probabilities[0])\n new_interval = probabilities[0]\n\n for i in range(1, len(probabilities)):\n new_interval += probabilities[i]\n intervals.append(new_interval)\n\n counter = 0\n new_numbers = []\n while counter <= num_samples:\n for i in range(len(intervals)):\n # Generate a random num between 0 - 1\n # i.e. flip a coin.\n rand_prob = np.random.random_sample((1,))\n if rand_prob <= [intervals[i]]:\n new_numbers.append(numbers[i])\n counter += 1\n\n return new_numbers",
"def samplingWithReplacement(m):\n return [ random.randrange(m) for i in range(m) ]",
"def correctPValues(pvalues, method = 'BH'):\n \n pvals = np.asarray(pvalues);\n\n if method.lower() in ['bh', 'fdr']:\n \n pvals_sorted_ids = np.argsort(pvals);\n pvals_sorted = pvals[pvals_sorted_ids]\n sorted_ids_inv = pvals_sorted_ids.argsort()\n\n n = len(pvals);\n bhfactor = np.arange(1,n+1)/float(n);\n\n pvals_corrected_raw = pvals_sorted / bhfactor;\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected>1] = 1;\n \n return pvals_corrected[sorted_ids_inv];\n \n elif method.lower() in ['b', 'fwer']:\n \n n = len(pvals); \n \n pvals_corrected = n * pvals;\n pvals_corrected[pvals_corrected>1] = 1;\\\n \n return pvals_corrected;\n \n #return reject[pvals_sortind.argsort()]",
"def rand_replace_vals(d): \n d = {k: np.random.rand() for k in d}\n \n return d",
"def setp(self, pvals):\n if pvals is None:\n return\n\n # set here rather than delegating to a Parameters\n # object, because it may not necessarily be a\n # Parameters object\n _varying_parameters = self.varying_parameters()\n if len(pvals) == len(_varying_parameters):\n for idx, param in enumerate(_varying_parameters):\n param.value = pvals[idx]\n return\n\n # values supplied are enough to specify all parameter values\n # even those that are repeated\n flattened_parameters = list(flatten(self.parameters))\n if len(pvals) == len(flattened_parameters):\n for idx, param in enumerate(flattened_parameters):\n param.value = pvals[idx]\n return\n\n raise ValueError(\n f\"Incorrect number of values supplied ({len(pvals)})\"\n f\", supply either the full number of parameters\"\n f\" ({len(flattened_parameters)}, or only the varying\"\n f\" parameters ({len(_varying_parameters)}).\"\n )",
"def __setitem__(self, values, p):\n if isinstance(values, dict):\n values = [values[var] for var in self.variables]\n self.prob[values] = p\n for var,val in zip(self.variables, values):\n if val not in self.vals[var]:\n self.vals[var].append(val)",
"def random_sample_from_discrete_domain(dscr_vals, num_points=None):\n def _sample_single_point():\n \"\"\" Samples a single point. \"\"\"\n return [np.random.choice(categ) for categ in dscr_vals]\n # Now draw num_points of them\n num_points_to_sample = 1 if num_points is None else num_points\n if len(dscr_vals) == 0:\n ret = [[]] * num_points_to_sample\n else:\n ret = [_sample_single_point() for _ in range(num_points_to_sample)]\n if num_points is None:\n return ret[0]\n else:\n return ret",
"def weighted_sample(choices: List[Any], probs: List[float]):\n probs = np.concatenate(([0], np.cumsum(probs)))\n r = random.random()\n for j in range(len(choices) + 1):\n if probs[j] < r <= probs[j + 1]:\n return choices[j]",
"def test_sampling_probability_correction(self, random_seed):\n\n # (num_queries, num_candidates)\n shape = (10, 20)\n rng = np.random.RandomState(random_seed)\n\n logits = rng.uniform(size=shape).astype(np.float32)\n probs = rng.uniform(size=shape[1]).astype(np.float32)\n\n corrected_logits = loss.SamplingProbablityCorrection()(logits, probs)\n corrected_logits = corrected_logits.numpy()\n\n np.testing.assert_array_less(logits, corrected_logits)\n\n # set some of the probabilities to 0\n probs_with_zeros = probs * rng.choice([0., 1.], size=probs.shape)\n\n corrected_logits_with_zeros = loss.SamplingProbablityCorrection()(\n logits, probs_with_zeros)\n corrected_logits_with_zeros = corrected_logits_with_zeros.numpy()\n\n np.testing.assert_array_less(logits, corrected_logits_with_zeros)",
"def sample_from_list(l, probs, max_n=None):\n assert len(l) == len(probs), 'given list l and probs must have same length'\n if max_n is None:\n max_n = len(l)\n sum_probs = sum(probs)\n if sum_probs == 0:\n return []\n probs_ = np.array(probs) / sum_probs\n # we draw max n or |probs_ > 0|\n # noinspection PyTypeChecker\n n = min(max_n, np.sum(probs_ > 0))\n # use idx approach as direct passing to np.random.choice would convert\n # items of l into str\n # noinspection PyUnresolvedReferences\n res = [\n l[idx] for idx in np.random.choice(len(l), n, replace=False, p=probs_)\n ]\n return res",
"def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\n from numpy import array, empty\n pvalues = array(pvalues)\n n = len(pvalues)\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n-rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in xrange(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sample from the categorical distribution using `pvals`. | def categorical(pvals: np.ndarray) -> int:
return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals))) | [
"def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))",
"def sample(self, params):\n probabilities = utils.normalize(self.counts[tuple(params)])\n sampler = self.rng.categorical_sampler(self.support(params), probabilities)\n return sampler()",
"def sample_categorical(p):\n q = exp(p - logsumexp(p))\n r = random()\n k = 0\n while k < len(q) - 1 and q[k] <= r:\n r -= q[k]\n k += 1\n return k",
"def categorical_sampling(distribution):\n no_drawings = 1\n sample = np.argmax(np.random.multinomial(no_drawings, distribution))\n\n return sample",
"def random_sample_from_discrete_domain(dscr_vals, num_points=None):\n def _sample_single_point():\n \"\"\" Samples a single point. \"\"\"\n return [np.random.choice(categ) for categ in dscr_vals]\n # Now draw num_points of them\n num_points_to_sample = 1 if num_points is None else num_points\n if len(dscr_vals) == 0:\n ret = [[]] * num_points_to_sample\n else:\n ret = [_sample_single_point() for _ in range(num_points_to_sample)]\n if num_points is None:\n return ret[0]\n else:\n return ret",
"def sample_categorical(distribution):\n sample = random.random()\n for event, prob in distribution.items():\n if sample < prob:\n return event\n sample -= prob\n raise ValueError('sum of distribution less than one')",
"def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r",
"def random_sample_from_prod_discrete_domain(list_of_list_of_vals, num_samples):\n return random_sample_from_discrete_domain(list_of_list_of_vals, num_samples)",
"def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())",
"def classify_pval(self, sample):\n\t\tpass",
"def select_five_categories(prob_dist_dict):\n # For clarity, save keys as labels and values as probabilities.\n labels = list( prob_dist_dict.keys() )\n probs = list( prob_dist_dict.values() )\n\n # Use numpy's .choice() to return a label based on the given weight.\n return list( np.random.choice(labels, 5, p=probs) )",
"def sample_categorical(probabilities):\n\n cumsum = np.cumsum(probabilities, axis=1)\n rand = np.random.random_sample(size=probabilities.shape[0])[:, None]\n activations = np.zeros(probabilities.shape)\n activations[range(probabilities.shape[0]), np.argmax((cumsum >= rand), axis=1)] = 1\n return activations",
"def _sample(self, probs):\n # position zero is an invalid item to sample\n probs[:, 0] = 0.0\n return torch.multinomial(probs, 1)",
"def sample(self):\n \"*** YOUR CODE HERE ***\"\n items = self.items() # Extract the items, a tuple of values and distributions\n values = [i[0] for i in items] # Values of the distribution\n dist = [i[1] for i in items] # Distribution\n self.normalize() # In case the total does not sum to 1\n random_sample = random.random() # A random sample, random number between 0 and 1\n iteration, iteration_dist = 0, dist[0] # Initialization of i, the total will be calculated iteratively\n while random_sample > iteration_dist: # If random sample exceeds total, then the corresponding value will be the weight\n iteration += 1 # Iterate i\n iteration_dist += dist[iteration] # Add the i'th element of distribution to the total\n return values[iteration]",
"def sampleFromCategorical(theta):\n theta = theta/np.sum(theta)\n return np.random.multinomial(1, theta).argmax()",
"def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)",
"def random_bins(num_classes, dist):\n N = dist.shape[0]\n bins = np.empty([N,1], dtype=np.int32)\n \n for i in range(N):\n smpl = np.random.choice(num_classes, p=dist[i,:]/np.sum(dist[i,:]))\n bins[i,0] = smpl\n \n return bins",
"def sample(self):\n return gc.rand_state.choice(self.domain)",
"def sample_population(trips_df, sample_perc, attributes_df=None, weight_col='freq'):\n if attributes_df is not None:\n sample_pids = trips_df.groupby('pid')[['freq']].sum().join(\n attributes_df, how='left'\n ).sample(\n frac=sample_perc, weights=weight_col\n ).index\n else:\n sample_pids = trips_df.groupby('pid')[['freq']].sum().sample(\n frac=sample_perc, weights=weight_col\n ).index\n\n return trips_df[trips_df.pid.isin(sample_pids)]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a population (list of observations) to a CDF. | def population2cdf(population: np.ndarray) -> np.ndarray:
population = np.sort(population)
return np.searchsorted(population, population, side="right") / len(population) | [
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def convert_to_cudf(cp_arrays):\n cupy_vertices, cupy_hubs, cupy_authorities = cp_arrays\n df = cudf.DataFrame()\n df[\"vertex\"] = cupy_vertices\n df[\"hubs\"] = cupy_hubs\n df[\"authorities\"] = cupy_authorities\n return df",
"def convert_to_cudf(cp_arrays):\n first, second = cp_arrays\n df = cudf.DataFrame()\n df[\"first\"] = first\n df[\"second\"] = second\n return df",
"def MakeCdf(live):\n cdf = thinkstats2.Cdf(live.prglngth, label='prglngth')\n thinkplot.Cdf(cdf)\n thinkplot.Save('cumulative_prglngth_cdf',\n title='Pregnancy length',\n xlabel='weeks',\n ylabel='CDF')",
"def generate_cdfs(self):\n\n self._CDFs = []\n\n # Need to store max/min samples in each dimension to prevent out of\n # bounds values in the interpolators later.\n self.mins = []\n self.maxs = []\n\n # Get all samples of the i^th dimension at a time to generate CDF\n # NOTE - does iterating over / sorting happen in place? Need deep copy?\n for samples_i in self._samples.T:\n\n # Generate/store interpolant for empirical CDF:\n sorted_i = np.sort(samples_i)\n cdf_values = np.arange(1, len(sorted_i) + 1)/float(len(sorted_i))\n cdf_func = interpolate.interp1d(sorted_i, cdf_values)\n self._CDFs.append(cdf_func)\n\n self.mins.append(sorted_i[0])\n self.maxs.append(sorted_i[-1])",
"def convert_to_cudf(cp_arrays: cp.ndarray) -> cudf.Series:\n vertices = cudf.Series(cp_arrays)\n\n return vertices",
"def cdf(weights):\r\n\treturn np.cumsum(weights) / sum(weights)",
"def cdf(self, value):\n return self._normal.cdf(value)",
"def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue",
"def icdf(self, value):\n return self._normal.icdf(value)",
"def get_plot_cdfs(self):\n\n x_grid = np.zeros((self._num_samples, self._dim))\n cdf_values = np.zeros((self._num_samples, self._dim))\n\n for i, samples_i in enumerate(self._samples.T):\n\n # Generate empirical CDF:\n sorted_i = np.sort(samples_i)\n cdf_vals_i = np.arange(1, len(sorted_i) + 1)/float(len(sorted_i))\n x_grid[:, i] = sorted_i\n cdf_values[:, i] = cdf_vals_i\n\n return x_grid, cdf_values",
"def makePDFCDF(self, item):\n row = self.candDistsTable.currentRow()\n dist_obj = self.cDists.get_obj(row)\n dist_name = dist_obj.get_label()\n PlotWindow(self, dist_obj, dist_name, plot_type=\"pdfcdf\")",
"def cdf_to_pdf(cdf):\n pdf = deepcopy(cdf)\n pdf[1:] -= pdf[:-1].copy()\n return pdf",
"def _convertDistrPointsToCdf(self,pts):\n try:\n return self.cdf(pts.real)\n except TypeError:\n return list(self.cdf(x) for x in pts)",
"def form_PNLF_CDF(data, PNLF, dM, obs_comp, M_5007, m_5007): \n sorted_data = np.sort(data)\n PNLF_comp_corr = np.array(np.interp(m_5007, M_5007+dM, PNLF)*obs_comp)\n PNLF_comp_corr[PNLF_comp_corr < 0] = 0.0\n PNLF_CDF = np.array(np.interp(sorted_data, m_5007, np.nancumsum(PNLF_comp_corr)/np.nansum(PNLF_comp_corr)))\n\n return PNLF_CDF",
"def get_cdf(pdf):\n pdf_norm = normalize(pdf) # Calculate the normalized pdf\n lower_bound = np.min(pdf.x)\n upper_bound = np.max(pdf.x)\n\n def cdf_number(x):\n \"\"\"Numerical cdf.\n\n :param x: The value to evaluate the cdf at.\n :return: The value of the cdf at x.\n \"\"\"\n if x <= lower_bound:\n return 0\n elif x >= upper_bound:\n return 1\n else:\n d = np.abs(x - lower_bound)\n if d > 1e-4: # Check that spacing isn't too small\n samples = np.linspace(lower_bound, x, 2 ** 7 + 1)\n dx = np.abs(samples[1] - samples[0])\n y = np.array([pdf_norm(s) for s in samples])\n return romb(y, dx)\n else:\n return 0\n\n def cdf_vector(x):\n \"\"\"Vectorized cdf.\n\n :param x: The values to evaluate the cdf at.\n :return: The values of the cdf at x.\n \"\"\"\n try:\n return np.array([cdf_number(xi) for xi in x])\n except AttributeError:\n return cdf_number(x)\n\n return cdf_vector",
"def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf",
"def ca_to_coils_second_df(agent_df):",
"def csv_to_cdf(metadata):\n\n basefile = metadata[\"basefile\"]\n\n try:\n ds = read_exo(basefile + \".csv\", skiprows=metadata[\"skiprows\"])\n except UnicodeDecodeError:\n # try reading as Mac OS Western for old versions of Mac Excel\n ds = read_exo(\n basefile + \".csv\", skiprows=metadata[\"skiprows\"], encoding=\"mac-roman\"\n )\n\n metadata.pop(\"skiprows\")\n\n # write out metadata first, then deal exclusively with xarray attrs\n ds = utils.write_metadata(ds, metadata)\n\n del metadata\n\n ds = utils.ensure_cf(ds)\n\n ds = utils.shift_time(ds, 0)\n\n # configure file\n cdf_filename = ds.attrs[\"filename\"] + \"-raw.cdf\"\n\n ds.to_netcdf(cdf_filename, unlimited_dims=[\"time\"])\n\n print(\"Finished writing data to %s\" % cdf_filename)\n\n return ds"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a discrete PDF into a discrete CDF. | def pmf2cdf(pdf: np.ndarray) -> np.ndarray:
cdf = np.cumsum(pdf)
return cdf / cdf[-1] | [
"def cdf_to_pdf(cdf):\n pdf = deepcopy(cdf)\n pdf[1:] -= pdf[:-1].copy()\n return pdf",
"def get_cdf(pdf):\n pdf_norm = normalize(pdf) # Calculate the normalized pdf\n lower_bound = np.min(pdf.x)\n upper_bound = np.max(pdf.x)\n\n def cdf_number(x):\n \"\"\"Numerical cdf.\n\n :param x: The value to evaluate the cdf at.\n :return: The value of the cdf at x.\n \"\"\"\n if x <= lower_bound:\n return 0\n elif x >= upper_bound:\n return 1\n else:\n d = np.abs(x - lower_bound)\n if d > 1e-4: # Check that spacing isn't too small\n samples = np.linspace(lower_bound, x, 2 ** 7 + 1)\n dx = np.abs(samples[1] - samples[0])\n y = np.array([pdf_norm(s) for s in samples])\n return romb(y, dx)\n else:\n return 0\n\n def cdf_vector(x):\n \"\"\"Vectorized cdf.\n\n :param x: The values to evaluate the cdf at.\n :return: The values of the cdf at x.\n \"\"\"\n try:\n return np.array([cdf_number(xi) for xi in x])\n except AttributeError:\n return cdf_number(x)\n\n return cdf_vector",
"def makePDFCDF(self, item):\n row = self.candDistsTable.currentRow()\n dist_obj = self.cDists.get_obj(row)\n dist_name = dist_obj.get_label()\n PlotWindow(self, dist_obj, dist_name, plot_type=\"pdfcdf\")",
"def __conv(pdf, page, outfile):\n command = ['convert', '-monochrome', '-density ' + str(__RESOLUTION), \n pdf + '[' + str(page) + ']', outfile]\n return ' '.join(command)",
"def pdf_from_cdf(data, idx, what):\n\n cdf = data[what + '_sum'].cumsum() / data[what + '_sum'].sum()\n cdfi = scipy.interpolate.interp1d(cdf.index, cdf, 'linear', bounds_error=False)(idx)\n pdfi = np.hstack((cdfi[0], np.diff(cdfi) / np.diff(idx)))\n return pdfi",
"def _convertDistrPointsToCdf(self,pts):\n try:\n return self.cdf(pts.real)\n except TypeError:\n return list(self.cdf(x) for x in pts)",
"def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue",
"def CDFconvertToDistr(self,pts):\n return self._convertCdfPointsToDistr(self._convertStdPointsToCdf(pts))",
"def convert_pdf(pdf_path):\n with Image(filename=pdf_path, resolution=300, format=\"pdf\") as pdf:\n pdf.convert('tiff')\n pdf.save(filename='./data/raw/full.tiff')",
"def get_ccdf(degseq):\n uniques, counts = np.unique(degseq, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degseq.size)\n return uniques[::-1], (1. - cumprob)[::-1]",
"def pdf(self,x):\n if self.transformation:\n pdfValue = self.pdfInTransformedSpace(x)\n else:\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def _pdf(self, d):\n alpha = -2\n return math.exp(alpha * d)",
"def icdf(self, value):\n return self._normal.icdf(value)",
"def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue",
"def cdf(x, mu, sigma, alpha):\n if type(x) is np.ndarray:\n out = np.zeros(len(x))\n for i in range(len(x)):\n out[i] = quad(lambda x: skew_normal.pdf(x,mu,sigma,alpha), -np.inf, x[i])[0]\n return out\n\n else:\n return quad(lambda x: skew_normal.pdf(x,mu,sigma,alpha), -np.inf, x)[0]",
"def pdf(self,x):\n returnPdf = self._distribution.pdf(x)\n return returnPdf",
"def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf",
"def _convert(self):\n logger.info(\"Converting conformers to density\")\n logger.debug(\"Masking\")\n self._transformer.reset(full=True)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self._transformer.mask(self._rmask)\n mask = self._transformer.xmap.array > 0\n self._transformer.reset(full=True)\n\n nvalues = mask.sum()\n self._target = self.xmap.array[mask]\n logger.debug(\"Density\")\n nmodels = len(self._coor_set)\n self._models = np.zeros((nmodels, nvalues), float)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self.conformer.b = self._bs[n]\n self._transformer.density()\n model = self._models[n]\n model[:] = self._transformer.xmap.array[mask]\n np.maximum(model, self.options.bulk_solvent_level, out=model)\n self._transformer.reset(full=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate stochastic matrix `pm` to the power of infinity, by finding the eigenvector which corresponds to the eigenvalue 1. | def inf_matrix_power(pm: np.ndarray, dtype=np.float64) -> np.ndarray:
w, v = np.linalg.eig(
pm
) # scipy.linalg.eig would probably by faster as it can return the left and right eigen vectors
if not np.isclose(w[0], 1.0):
raise ValueError("The first eigenvalue is not none. Is this a right stochastic matrix?")
vi = np.linalg.inv(v)
d = np.zeros(pm.shape[0], dtype=dtype)
d[0] = 1.0
return np.matmul(v, np.matmul(np.diag(d), vi)) | [
"def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0",
"def solve_kepler_eqn(M,e):\n try:\n M[0]\n res = np.zeros(M.shape)\n for i,Mi in enumerate(M):\n tmp,= optimize.fsolve(lambda x: x-e*np.sin(x) - Mi,Mi)\n res[i] = tmp\n except IndexError:\n res, = optimize.fsolve(lambda x: x - e*np.sin(x)-M,M)\n return res",
"def test_eigenvalues_of_too_few_points_results_in_0():\n a = np.array([5])\n pc = create_point_cloud(a, a, a)\n\n compute_features(pc, [[0]], pc, [\"eigenv_1\", \"eigenv_2\", \"eigenv_3\"], InfiniteCylinder(5))\n\n eigen_val_123 = np.array([pc[keys.point]['eigenv_{}'.format(i)]['data'] for i in [1, 2, 3]])\n assert not np.any(np.isnan(eigen_val_123))\n assert not np.any(np.isinf(eigen_val_123))",
"def solve(p):\n A = np.zeros((N, N))\n\n A[0, 0] = 2 + 2 * dx ** 2 * potential(0, p)\n A[0, 1] = -1\n\n A[-1, -1] = 2 + 2 * dx ** 2 * potential(N, p)\n A[-1, -2] = -1\n\n for i in range(1, N - 1):\n A[i, i - 1] = A[i, i + 1] = -1\n A[i, i] = 2 + 2 * dx ** 2 * potential(i, p)\n\n w, v = np.linalg.eig(A)\n eigenvectors = [v[:, i] for i in range(len(v))]\n w, eigenvectors = zip(*sorted(zip(w, eigenvectors), key=lambda x: x[0])) # sort by eigenvalues\n energies = [e / (2 * dx ** 2) for e in w]\n return energies, eigenvectors",
"def solve_kep_eqn(M,e):\n # Calculates eccentric anonaly E from mean anomaly M\n #try:\n #M[0]\n #res = np.zeros(M.shape)\n #for i,Mi in enumerate(l):\n #tmp,= fsolve(lambda E: E-e*np.sin(E) - Mi,Mi)\n #res[i] = tmp\n #except IndexError:\n #res, = fsolve(lambda E: E - e*np.sin(E)-M,M)\n res = fsolve(lambda E: E - e*np.sin(E)-M,M)\n return res",
"def free_body_eigen_problem(self,p):\n M = sym.eye(p.qs*2)\n M[-p.qs:,-p.qs:]=self.M\n\n K = sym.zeros(p.qs*2)\n K[:p.qs,-p.qs:] = sym.eye(p.qs)\n K[-p.qs:,:p.qs] = -self.f.jacobian(p.q)\n K[-p.qs:,-p.qs:] = -self.f.jacobian(p.qd)\n return K,M",
"def posdef_eig_self_adjoint(mat):\n evals, evecs = tf.self_adjoint_eig(mat)\n evals = tf.abs(evals) # Should be equivalent to svd approach.\n\n return evals, evecs",
"def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]",
"def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors",
"def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E",
"def Problem4(n):\n A = Problem2(n)\n eig = min(sl.eigs(A.asfptype(), which='SM')[0])\n \n print \"lamba*n^2 approaches pi^2 as n goes to infinity\"\n return eig*n**2",
"def stationary_distribution_from_eigenvector(T):\n val, L = eig(T, left=True, right=False)\n\n \"\"\" Sorted eigenvalues and left and right eigenvectors. \"\"\"\n perm=np.argsort(val)[::-1]\n\n val=val[perm]\n L=L[:,perm]\n \"\"\" Make sure that stationary distribution is non-negative and l1-normalized \"\"\"\n nu=np.abs(L[:,0])\n mu=nu/np.sum(nu)\n return mu",
"def initial_guess(M, e, k=0.85):\n return M + np.sign(np.sin(M)) * k * e",
"def test_eigenvalue_calculation(test_system):\n\n assert test_system.calculate_eigenvalues().min_damping < 100",
"def _calc_ev(self, M, k=None, herm=False):\n if self.verbosity >= 1:\n if k:\n print \"computing %d EVs (matrix size %s)...\"%(k, M.shape,)\n else:\n print \"computing EVs (matrix size %s)...\"%(M.shape,)\n import os\n utime0 = os.times()[0] # utime\n if k is None:\n if herm:\n # Assume hermitian matrix\n ev, evec = scipy.linalg.eigh(M)\n else:\n ev, evec = scipy.linalg.eig(M)\n else:\n # Calculate only 'k' eigenvectors. Only negative indexes\n # make sense in this case. LR=largest real part.\n def eigs_sparse(M, k):\n #M = M.astype(float)\n #return scipy.linalg.eig(M)\n try:\n return scipy.sparse.linalg.eigs(M, k=k, which='LR')\n except AttributeError:\n # Older scipy: should be removed once possible\n import scipy.sparse.linalg.eigen.arpack as arpack\n return arpack.eigen(M, k=k, which='LR')\n if herm:\n ev, evec = scipy.sparse.linalg.eigsh(M, k, which='LR')\n else:\n #ev, evec = scipy.sparse.linalg.eigs(M, k, which='LR')\n ev, evec = eigs_sparse(M, k)\n self.time_ev = os.times()[0]-utime0\n if self.verbosity >= 1:\n print \"done, took %f seconds.\"%(self.time_ev)\n return ev, evec",
"def current(edges, transition_matrix):\n ### Calculate the state frequecies ###\n # Eigenvalues and Eigenvectors of transition matrix\n vals, vl, vr = sp.linalg.eig(transition_matrix, left=True)\n # Find the eigenvalue that == 1\n index = list(vals).index(1)\n state_freq = vl[:,index]\n\n committor_plus = np.linalg.eig\n\n\n ### Calculate the flux matrix ###\n flux_matrix = np.multiply(transition_matrix, state_freq)\n return flux_matrix / flux_matrix.sum(axis=1)",
"def get_eigen_value(A, v):\n Av = np.dot(A, v)\n print(\"Mag v, should be 1:\", mag(v))\n lmb = mag(Av) / mag(v)\n return lmb",
"def test_em_nonlinear(self):\n z_matrix = np.array(\n [[0.00000000, 0.00000000, 0.00000000],\n [0.00000000, 0.00000000, 0.16666667],\n [0.03333333, 0.08333333, 0.00000000],\n [0.03333333, 0.08333333, 0.16666667],\n [0.06666667, 0.16666667, 0.00000000],\n [0.06666667, 0.16666667, 0.16666667],\n [0.10000000, 0.16666667, 0.00000000],\n [0.10000000, 0.16666667, 0.16666667],\n [0.13333333, 0.08333333, 0.00000000],\n [0.13333333, 0.08333333, 0.16666667],\n [0.16666667, 0.00000000, 0.00000000],\n [0.16666667, 0.00000000, 0.16666667]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"EM\")\n expected_w_vector = np.array(\n [0.20724531, 0.31710188, 0.47565280],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)",
"def find_vector(mat):\r\n eig = np.linalg.eig([[mat[1, 1], mat[1, 2]], [mat[2, 1], mat[2, 2]]])\r\n\r\n minimum = eig[0][0]\r\n index = 0\r\n for i in range(1, 2):\r\n if eig[0][i] < minimum:\r\n minimum = eig[0][i]\r\n index = i\r\n\r\n n = [0, eig[1][0][index], eig[1][1][index]]\r\n n = n / np.linalg.norm(n)\r\n\r\n return n"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace colored pixels with a `neutral_color`. The `ratio` defines the 'colorfulness' above which level the pixel should be replace. I.e. if the `ratio` is 1 nothing will be replaced, if `ratio` is 0 only strict greys are kept unmodified. | def remove_color(img: np.ndarray, ratio: float, neutral_color: Tuple[int, int, int] = RGB_WHITE) -> None:
channels = img.shape[-1]
assert channels == 3, "Not a 3 channel color image"
norm = np.std(np.array(RGB_YELLOW)) # this is the same for all pure colors
sd = np.std(img, axis=-1)
img[sd > ratio * norm] = neutral_color | [
"def set_neutral_config(self, neutral_config_dict):\n self._neutral_config = neutral_config_dict",
"def set_neutral(self):\n print(\"Moving to neutral pose...\")\n self._right_arm.move_to_neutral(speed = 0.15)",
"def ratio_to_rgb(ratio):\n b = 0\n if round(ratio, 1) == 0.5:\n r = 255\n g = 255\n elif ratio < 0.5:\n r = int(ratio * 2 * 255.0)\n g = 255\n else:\n r = 255\n g = int((1.0 - ratio) * 2 * 255.0)\n rgb = (r, g, b)\n\n return rgb",
"def test_ratio_imprim(self, ratio):\n n_rots = 500\n weights = np.random.random(size=(1, n_rots))\n\n op = qml.RandomLayers(weights, wires=range(2), ratio_imprim=ratio)\n queue = op.decomposition()\n\n gate_names = [gate.name for gate in queue]\n ratio_impr = gate_names.count(\"CNOT\") / len(gate_names)\n assert np.isclose(ratio_impr, ratio, atol=0.05)",
"def simple_ratio(img, red, nir):\n simple_ratio = img[nir]/img[red]\n return simple_ratio[None,:,:]",
"def set_neutral(self):\n\t\tself._head.set_pan(0.0)",
"def ratio_inverted_or_not(self, inverse, ratio):\n # If ratio 0, bracket would return -inf, which makes no practical sense, so we just return ratio = 0\n if ratio > 0:\n # Get correct ratio bin according to inverse or not\n return self.transformer.iratio_bins[ratio] if inverse is True else self.transformer.ratio_bins[ratio]\n else:\n # This will never be traded, as win loss ratio is 0, meaning that win value is 0\n return ratio",
"def to_aspect_ratio_add_and_remove(image, target_ratio):\n height = image.shape[0]\n width = image.shape[1]\n ratio = width / height\n\n remove_top = 0\n remove_right = 0\n remove_bottom = 0\n remove_left = 0\n pad_top = 0\n pad_bottom = 0\n pad_left = 0\n pad_right = 0\n\n # loops here are inefficient, but easy to read\n i = 0\n if ratio < target_ratio:\n # vertical image, height > width\n while ratio < target_ratio:\n if i % 4 == 0:\n remove_top += 1\n height -= 1\n elif i % 4 == 2:\n remove_bottom += 1\n height -= 1\n elif i % 4 == 1:\n pad_right += 1\n width += 1\n else: # i % 4 == 3\n pad_left += 1\n width += 1\n ratio = width / height\n i += 1\n elif ratio > target_ratio:\n # horizontal image, width > height\n while ratio > target_ratio:\n if i % 4 == 0:\n remove_right += 1\n width -= 1\n elif i % 4 == 2:\n remove_left += 1\n width -= 1\n elif i % 4 == 1:\n pad_top += 1\n height += 1\n else: # i % 4 == 3\n pad_bottom += 1\n height += 1\n ratio = width / height\n i += 1\n\n # remove cols/rows\n if any([val > 0 for val in [remove_top, remove_right, remove_bottom, remove_left]]):\n image = image[remove_top:(height - remove_bottom), remove_left:(width - remove_right), ...]\n\n # add cols/rows (black)\n if any([val > 0 for val in [pad_top, pad_bottom, pad_left, pad_right]]):\n image = np.pad(image, ((pad_top, pad_bottom), \\\n (pad_left, pad_right), \\\n (0, 0)), \\\n mode=\"constant\")\n\n return image",
"def removecolor(src, hue, tolerance):\n imgThresholded = mapcolor(src, hue, tolerance)\n imgEdited = cv2.cvtColor(imgThresholded, cv2.COLOR_GRAY2BGR)\n imgGray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n imgGray = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)\n imgGray = cv2.bitwise_and(imgGray, imgEdited)\n imgEdited = cv2.bitwise_and(src, imgEdited)\n imgEdited = src - imgEdited + imgGray\n return imgEdited",
"def flag_heterozygosity_ratio(cls, heterozygosity_ratio, **kwargs):\n heterozygosity_ratio = float(heterozygosity_ratio)\n result = cls.assign_flag(\n heterozygosity_ratio,\n warn_upper=cls.HETEROZYGOSITY_WARN_UPPER,\n warn_lower=cls.HETEROZYGOSITY_WARN_LOWER,\n )\n return result",
"def _hv_neutral():\n return hv.Curve([])",
"def set_slide_neutral(self):\n print(\"Moving to neutral pose...\")\n joint_positions = deepcopy(self.neutral_joint_positions)\n\n joint_positions['right_j5'] = joint_positions['right_j5'] - np.pi / 2.\n self._right_arm.move_to_joint_positions(joint_positions)",
"def src_set_ratio(state, new_ratio):\n return _lib.src_set_ratio(state, new_ratio) if state else None",
"def signal_to_noise_ratio(self, signal_to_noise_ratio):\n\n self._signal_to_noise_ratio = signal_to_noise_ratio",
"def _update_classification_localization_weight_ratio(configs, ratio):\n meta_architecture = configs[\"model\"].WhichOneof(\"model\")\n if meta_architecture == \"faster_rcnn\":\n model = configs[\"model\"].faster_rcnn\n model.first_stage_localization_loss_weight = 1.0\n model.first_stage_objectness_loss_weight = ratio\n model.second_stage_localization_loss_weight = 1.0\n model.second_stage_classification_loss_weight = ratio\n if meta_architecture == \"ssd\":\n model = configs[\"model\"].ssd\n model.loss.localization_weight = 1.0\n model.loss.classification_weight = ratio",
"def color_match(dominant: np.ndarray, threshold: int = 30) -> str:\n output = \"other\"\n\n img_color = rgb2lab(np.uint8(np.asarray([[dominant]])))\n red = rgb2lab(np.uint8(np.asarray([[[255, 0, 0 + 9]]])))\n green = rgb2lab(np.uint8(np.asarray([[[0, 128, 0]]])))\n blue = rgb2lab(np.uint8(np.asarray([[[0, 0, 255]]])))\n\n if deltaE_cie76(red, img_color) < threshold:\n output = \"red\"\n elif deltaE_cie76(green, img_color) < threshold:\n output = \"green\"\n elif deltaE_cie76(blue, img_color) < threshold:\n output = \"blue\"\n else:\n pass\n return output",
"def undersample_majority(df, ratio=1.0, random_state=3):\n count_class_0, count_class_1 = df[\"Status\"].value_counts()\n df_class_0 = df[df[\"Status\"] == \"paid\"]\n df_class_1 = df[df[\"Status\"] == \"defaulted\"]\n # print(count_class_0)\n # print(count_class_1)\n df_class_0_under = df_class_0.sample(\n int(ratio * count_class_1), random_state=random_state\n )\n df_train_under = pd.concat([df_class_0_under, df_class_1], axis=0)\n # print(df_train_under['Status'].value_counts)\n return df_train_under",
"def ratio1(self, ratio):\n if (ratio < Decimal('0.00')) and (ratio > Decimal(\"100.00\")):\n raise ValueError('Ratio must be >= to 0 and <= than 100')\n self._ratio1 = ratio",
"def ratio(self, ratio):\n\n self._ratio = ratio"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
np.broadcast_shapes requires `numpy==1.20.0`, which is not available for `python < 3.7`. | def broadcast_shapes(*shapes: Tuple[int, ...]) -> Tuple[int, ...]:
arrays = [np.empty(shape) for shape in shapes]
return np.broadcast(*arrays).shape | [
"def _broadcastShape(shape1, shape2):\n\n shape1, shape2, broadcastshape = _broadcastShapes(shape1, shape2)\n return broadcastshape",
"def broadcast_shape(*shapes, **kwargs):\n strict = kwargs.pop(\"strict\", False)\n reversed_shape = []\n for shape in shapes:\n for i, size in enumerate(reversed(shape)):\n if i >= len(reversed_shape):\n reversed_shape.append(size)\n elif reversed_shape[i] == 1 and not strict:\n reversed_shape[i] = size\n elif reversed_shape[i] != size and (size != 1 or strict):\n raise ValueError(\n \"shape mismatch: objects cannot be broadcast to a single shape: {}\".format(\n \" vs \".join(map(str, shapes))\n )\n )\n return tuple(reversed(reversed_shape))",
"def broadcasted_shape_from_arrays(*arrays):\n\n shapes = [np.shape(array) for array in arrays]\n return broadcasted_shape(*shapes)",
"def broadcasted_shape(*shapes):\n dim = 0\n for a in shapes:\n dim = max(dim, len(a))\n S = ()\n for i in range(-dim,0):\n s = 1\n for a in shapes:\n if -i <= len(a):\n if s == 1:\n s = a[i]\n elif a[i] != 1 and a[i] != s:\n raise ValueError(\"Shapes %s do not broadcast\" % (shapes,))\n S = S + (s,)\n return S",
"def _broadcastShapes(shape1, shape2):\n\n if len(shape1) > len(shape2):\n shape2 = (1,) * (len(shape1) - len(shape2)) + shape2\n elif len(shape1) < len(shape2):\n shape1 = (1,) * (len(shape2) - len(shape1)) + shape1\n\n def maxzero(s, o):\n if s == 0 or o == 0:\n return 0\n else:\n return py_max(s, o)\n\n if logical_and.reduce([(s == o or s == 1 or o == 1) for s, o in zip(shape1, shape2)]):\n broadcastshape = tuple([maxzero(s, o) for s, o in zip(shape1, shape2)])\n else:\n broadcastshape = None\n\n return (shape1, shape2, broadcastshape)",
"def _mul_broadcast_shape(shape_a, shape_b):\n bc_shape = []\n len_a, len_b = len(shape_a), len(shape_b)\n for i in range(1, 1 + min(len_a, len_b)):\n s_a, s_b = shape_a[-i], shape_b[-i]\n if s_a != s_b:\n if min(s_a, s_b) > 1:\n raise RuntimeError(\"batch sizes not broadcastable\")\n bc_shape.insert(0, max(s_a, s_b))\n else:\n bc_shape.insert(0, s_a)\n\n # fill remaining dimensions on the left if necessary\n delta_len = len_a - len_b\n if delta_len > 0:\n bc_shape = list(shape_a[:delta_len]) + bc_shape\n elif delta_len < 0:\n bc_shape = list(shape_b[:-delta_len]) + bc_shape\n\n return torch.Size(bc_shape)",
"def test_broadcast_tensors():\n datatype = [np.bool8, np.int32, np.int64, np.float32, np.float64]\n for i in datatype:\n x = randtool(\"float\", -5, 5, [1, 6, 2, 6, 1]).astype(i)\n y = randtool(\"float\", -5, 5, [1, 6, 1, 1, 3]).astype(i)\n z = randtool(\"float\", -5, 5, [1, 1, 1, 6, 3]).astype(i)\n s = randtool(\"float\", -5, 5, [1, 6, 1, 6, 3]).astype(i)\n\n res = np.broadcast_arrays(x, y, z, s)\n paddle_res = paddle.broadcast_tensors(input=[\n paddle.to_tensor(x), paddle.to_tensor(y), paddle.to_tensor(z),\n paddle.to_tensor(s)\n ])\n paddle_res_numpy = []\n for i in paddle_res:\n paddle_res_numpy.append(i.numpy())\n assert np.allclose(res, paddle_res_numpy)",
"def shape_to_broadcast(shape):\n return tuple(n==1 for n in shape)",
"def broadcast_to(array, shape):\n if _np.isscalar(array):\n return full(shape, array)\n return _npi.broadcast_to(array, shape)",
"def generalized_broadcast(arrays):\n arrays1 = np.broadcast_arrays(*[A[..., 0] for A in arrays])\n shapes_b = [A1.shape + (A.shape[-1],) for A1, A in zip(arrays1, arrays)]\n strides_b = [A1.strides + (A.strides[-1],) for A1, A in zip(arrays1, arrays)]\n arrays_b = [as_strided(A, shape=shape_Ab, strides=strides_Ab)\n for A, shape_Ab, strides_Ab in zip(arrays, shapes_b, strides_b)]\n return arrays_b",
"def testBroadcastDimension(self, axis, row_length, original_dim_sizes,\n broadcast_dim_sizes):\n original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes)\n bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes)\n self.assertEqual(original_shape.rank, bcast_shape.rank)\n # shape[axis].value == 1 and row_length > 1:\n bcast1 = original_shape.broadcast_dimension(axis, row_length)\n # shape[axis].value > 1 and row_length == shape[axis].value:\n bcast2 = bcast_shape.broadcast_dimension(axis, row_length)\n # shape[axis].value > 1 and row_length == 1:\n bcast3 = bcast_shape.broadcast_dimension(axis, 1)\n\n self.assertShapeEq(bcast1, bcast_shape)\n self.assertShapeEq(bcast2, bcast_shape)\n self.assertShapeEq(bcast3, bcast_shape)",
"def _broadcast_to_shape(x, shape):\n ndim_to = len(shape)\n x = _expand(x, ndim_to)\n return _broadcast_to(x, F.shape(x), shape, ndim_to)",
"def broadcast_to(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = BroadcastTo(shape).apply((x,))\n return y",
"def broadcast(broadcast_shape, x):\n if F.shape(x) == broadcast_shape:\n return x\n multiples = const_utils.compute_multiples(F.shape(x), broadcast_shape)\n if multiples:\n return F.tile(x, multiples)\n return x",
"def explicit_broadcasting(input_value: np.array, target_shape: np.array, axes_mapping: np.array) -> np.array:\n res_shape, normalized_axes_mapping = explicit_shape_broadcasting(input_value.shape, target_shape, axes_mapping)\n #TODO: Function 'expand_dims' should be replaced with 'numpy.expand_dims' if numpy version will be >=18.x in requirements.\n expand_dim_axis = set(np.arange(len(target_shape))) - set(normalized_axes_mapping)\n input_expanded = input_value.copy()\n \n for axis in sorted(list(expand_dim_axis)):\n input_expanded = np.expand_dims(input_expanded, axis)\n return np.broadcast_to(input_expanded, res_shape)",
"def can_broadcast(shape1, shape2) -> bool:\n return(\n reduce(\n lambda a, b: a and b,\n starmap(\n lambda a, b: (a == b or (a == 1 or b == 1)),\n zip_longest(shape1, shape2, fillvalue=1)\n )\n )\n )",
"def tt_broadcast_arrays(*args: TensorVariable):\n bcast_shape = broadcast_shape(*args)\n return tuple(at_broadcast_to(a, bcast_shape) for a in args)",
"def explicit_shape_broadcasting(input_shape: np.array, target_shape: np.array, axes_mapping: np.array) -> [np.array, np.array]:\n assert np.all(np.diff(axes_mapping) >= 0), \"axes_mapping is not sorted\"\n assert len(axes_mapping) == len(input_shape), \"size of axes_mapping does not match to rank of input\"\n axes_mapping = mo_array(list(map(lambda axis: axis + len(target_shape) if axis < 0 else axis, axes_mapping)))\n\n res = target_shape.copy()\n for i, axis in enumerate(axes_mapping):\n assert 0 <= axis < len(res), \"axis value from axes_mapping exceeds rank of target_shape\"\n assert res[axis] == input_shape[i], \"specified mapping axis in target_shape differs from axis in input_shape\"\n return res, axes_mapping",
"def broadcast_rule(shape_a, shape_b):\n assert(isinstance(shape_a, tuple))\n assert(isinstance(shape_b, tuple))\n if len(shape_a) > len(shape_b):\n longer_shape, shorter_shape = shape_a, shape_b\n else:\n longer_shape, shorter_shape = shape_b, shape_a\n len_diff = len(longer_shape) - len(shorter_shape)\n for i in range(len_diff):\n # pad with leading 1s\n shorter_shape = (1,) + shorter_shape\n assert len(shorter_shape) == len(longer_shape)\n output_shape = list(longer_shape)\n for i in range(len(output_shape)):\n assert (shorter_shape[i] == longer_shape[i]) \\\n or (shorter_shape[i] == 1) \\\n or (longer_shape[i] == 1)\n output_shape[i] = max(shorter_shape[i], longer_shape[i])\n return tuple(output_shape)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Batched center of mass calculation of 2d arrays | def center_of_mass_2d(arr: np.ndarray, dtype=np.float32) -> np.ndarray:
total = np.sum(arr, axis=(-1, -2))
grids = np.ogrid[[slice(0, i) for i in arr.shape[-2:]]]
with np.errstate(invalid="ignore"):
results = np.array([np.sum(arr * grid.astype(dtype), axis=(-1, -2)) / total for grid in grids], dtype=dtype)
results = np.moveaxis(results, 0, -1)
return results | [
"def centerOfMass(data):\r\n dd = []\r\n for d in data:\r\n dd.append(d.coordinate)\r\n\r\n data = dd\r\n data = np.array(data)\r\n n = len(data)\r\n x = sum(data[:,0])\r\n y = sum(data[:,1])\r\n z = sum(data[:,2])\r\n x/=n\r\n y/=n\r\n z/=n\r\n return x,y,z,n",
"def center_of_mass(gt_images):\r\n # get a list of centers of mass from a list of images\r\n centers = []\r\n for i in range(len(gt_images)):\r\n sumy = 0\r\n sumx = 0\r\n div = len(gt_images[i])\r\n\r\n for j in range(len(gt_images[i])):\r\n im = gt_images[i][j]\r\n center=ndimage.measurements.center_of_mass(im)\r\n if np.isnan(center[0]) == False or np.isnan(center[1]) == False:\r\n sumy = sumy + center[0]\r\n sumx = sumx + center[1]\r\n\r\n else:\r\n div = div - 1\r\n\r\n centers.append((np.ceil(sumy/div),np.ceil(sumx/div)))\r\n return centers",
"def center_of_mass(im_binary, x_offset=0, y_offset=0):\n n = np.sum(im_binary)\n\n x = np.arange(im_binary.shape[1]) + x_offset\n y = np.arange(im_binary.shape[0]) + y_offset\n xv, yv = np.meshgrid(x, y)\n cx = np.sum(xv[im_binary]) / n\n cy = np.sum(yv[im_binary]) / n\n\n return cx, cy",
"def center_of_mass(particle_in_cell, num, mass, position):\r\n result = []\r\n total_mass = []\r\n #Initialise the output lists\r\n position_x = position[0,:]\r\n position_y = position[1,:]\r\n position_z = position[2,:]\r\n \r\n for i in range(len(particle_in_cell)):\r\n COM_x = 0.0\r\n COM_y = 0.0\r\n COM_z = 0.0\r\n M_total = 0.0\r\n #Initialise the center of mass position and the total mass of particles in the grid\r\n for j in range(num[i]):\r\n COM_x += mass[particle_in_cell[i][j]]*position_x[particle_in_cell[i][j]]\r\n COM_y += mass[particle_in_cell[i][j]]*position_y[particle_in_cell[i][j]]\r\n COM_z += mass[particle_in_cell[i][j]]*position_z[particle_in_cell[i][j]]\r\n M_total += mass[particle_in_cell[i][j]]\r\n #Calculate the center off mass\r\n result.append(np.array([COM_x/M_total, COM_y/M_total, COM_z/M_total]))\r\n total_mass.append(M_total)\r\n return result, total_mass",
"def compute_center_of_mass(traj):\n\n com = np.zeros((traj.n_frames, 3))\n masses = np.array([a.element.mass for a in traj.top.atoms])\n masses /= masses.sum()\n\n for i, x in enumerate(traj.xyz):\n com[i, :] = x.astype('float64').T.dot(masses)\n return com",
"def center_of_mass_chunk(chunk_coords, black_mask, width, height):\n sum_coords = (0,0)\n black_list = list_black_in_chunk(chunk_coords, black_mask, width, height)\n for coords in black_list:\n sum_coords = (sum_coords[0] + coords[0], sum_coords[1] + coords[1])\n return (round(sum_coords[0]/len(black_list)), round(sum_coords[1]/len(black_list)))",
"def center_of_mass(row):\n size = np.size(row)\n value = 0.0\n for j in range(0, size):\n value = value + row[j]*(j+1)\n value = value/np.sum(row)\n return value",
"def center_of_mass(img):\n return ndi.center_of_mass(img)",
"def center_of_mass(self):\n masses = cctk.OneIndexedArray([get_avg_mass(z) for z in self.atomic_numbers]).reshape(-1,1)\n return np.sum(masses * self.geometry, axis=0) / np.sum(masses)",
"def get_center_of_mass_allies(self,obs):",
"def centerofmass(self ):\n #\n # Intialize center of mass list \n cent_mass = np.zeros( self.n_dim)\n total_mass = 0.0\n #\n for pkey_i in self.pkeys:\n particle_i = self.strucC.particles[pkey_i]\n r_i = self.strucC.positions[pkey_i]\n mass_i = particle_i.mass\n total_mass += mass_i\n for dim in range(self.n_dim):\n cent_mass[dim] += mass_i*r_i[dim]\n # Normalize\n for dim in range(self.n_dim):\n cent_mass[dim] = cent_mass[dim]/total_mass\n #\n self._property['cent_mass'] = cent_mass\n self._property['total_mass'] = total_mass\n\n return",
"def calculate_centers_of_mass(x_all, y_all):\n num_of_frames, num_of_rafts = x_all.shape\n\n x_centers = x_all[:, 0:num_of_rafts].mean(axis=1)\n y_centers = y_all[:, 0:num_of_rafts].mean(axis=1)\n\n x_relative_to_centers = x_all - x_centers[:, np.newaxis]\n y_relative_to_centers = y_all - y_centers[:, np.newaxis]\n\n distances_to_centers = np.sqrt(x_relative_to_centers ** 2 + y_relative_to_centers ** 2)\n\n orbiting_angles = np.arctan2(y_relative_to_centers, x_relative_to_centers) * 180 / np.pi\n\n return distances_to_centers, orbiting_angles, x_centers, y_centers",
"def center_of_mass(masks: Tensor, eps: float = 1e-7) -> Tensor:\n n, h, w = masks.shape\n grid_h = torch.arange(h, device=masks.device)[:, None]\n grid_w = torch.arange(w, device=masks.device)\n normalizer = masks.sum(dim=(1, 2)).float().clamp(min=eps)\n center_y = (masks * grid_h).sum(dim=(1, 2)) / normalizer\n center_x = (masks * grid_w).sum(dim=(1, 2)) / normalizer\n center = torch.cat([center_x[:, None], center_y[:, None]], dim=1)\n return center",
"def centre_of_mass(image, black_blob=False):\r\n image = image.copy()\r\n shape = image.shape\r\n if black_blob:\r\n image = 255-image\r\n centre = np.array([0, 0]).astype(float)\r\n\r\n #------------------------------START YOUR CODE-----------------------------#\r\n s = np.sum(image)\r\n indices = np.mgrid[0:image.shape[0],0:image.shape[1]]\r\n ys = np.sum(indices[0]*image)\r\n xs = np.sum(indices[1]*image)\r\n\r\n # Equivalent, but slower\r\n #xs = 0.0\r\n #ys = 0.0\r\n #s = 0.0 \r\n #for y in range(shape[0]):\r\n # for x in range(shape[1]):\r\n # p = image[y, x]\r\n # xs += x*p\r\n # ys += y*p\r\n # s += p\r\n\r\n centre = np.array([ ys/s, xs/s ])\r\n #-------------------------------END YOUR CODE------------------------------#\r\n return centre.astype(int)",
"def mass_center(model, sim):\r\n mass = np.expand_dims(model.body_mass, 1)\r\n xpos = sim.data.xipos\r\n return (np.sum(mass * xpos, 0) / np.sum(mass))[0]",
"def calcCenterOfMass(atoms):\n M = 0.0\n s = np.zeros((3)) \n for a in atoms:\n s += a.mass * a.position \n M += a.mass\n s /= M\n return s",
"def center_of_mass(self):\n weights = [s.species.weight for s in self]\n center_of_mass = np.average(self.frac_coords,\n weights=weights, axis=0)\n return center_of_mass",
"def center(X):\n \n n,m = X.shape\n if n != m:\n raise Exception('Matrix is not square.')\n \n colsum = X.sum(axis=0) / n\n rowsum = X.sum(axis=1) / n\n totalsum = X.sum() / (n**2)\n \n #center\n Y = array([[ X[i,j]-rowsum[i]-colsum[j]+totalsum for i in range(n) ] for j in range(n)])\n \n return Y",
"def center_of_mass(particles):\n\n masses = particles.mass\n position=particles.position\n total_mass = masses.sum()\n return (position * masses.reshape((len(masses),1))).sum(0) / total_mass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
validate_target verifies that target is a valid MAC address, IP address or hostname | def validate_target(target, arp_table):
try:
mac = mac_address(target)
return mac
except TypeError:
pass
try:
ip = ip_address(target)
if ip in arp_table.keys():
return arp_table[ip].mac
except TypeError:
pass
if target in arp_table:
return arp_table[target].mac
else:
raise TypeError('{} is not a valid target'.format(target)) | [
"def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True",
"def _is_valid_target(self, target, target_name, target_ports, is_pair):\n if is_pair:\n return (target[:utils.PORT_ID_LENGTH] in target_ports and\n target_name == self._PAIR_TARGET_NAME)\n return (target[:utils.PORT_ID_LENGTH] in target_ports and\n target_name.startswith(self.driver_info['target_prefix']) and\n target_name != self._PAIR_TARGET_NAME)",
"def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()",
"def target_is_valid(self, target_id=0):\n try:\n target = self.target(target_id=target_id)\n except:\n return False\n return target['state'] != \"invalid\"",
"def is_valid_target_of(self, node, source, name):\r\n return self._send({'name': 'isValidTargetOf', 'args': [node, source, name]})",
"def verify_as_host(self, target, message_handler):\n\n # Check we can host the target.\n if not self.supported_target(target, message_handler):\n raise UserException(\n \"{0} is not a supported {1} development host\".format(\n self.name, target.name))",
"def is_valid_hostname(target: str) -> bool:\n # Regex from https://www.regextester.com/99895\n # host_regex = r\"^(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?|^((http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\"\n host_regex = r\"^(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?|^((http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.)$\"\n\n pattern = re.compile(host_regex, re.I)\n\n if pattern.search(target):\n return True\n return False",
"def target_validation(target_name, action):\n json_data = read_file('presqt/specs/targets.json', True)\n for data in json_data:\n if data['name'] == target_name:\n if data[\"supported_actions\"][action] is False:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not support the action '{}'.\".format(target_name, action),\n status.HTTP_400_BAD_REQUEST)\n return True, data['infinite_depth']\n else:\n raise PresQTValidationError(\n \"PresQT Error: '{}' is not a valid Target name.\".format(target_name), status.HTTP_404_NOT_FOUND)",
"def test_target_resembles_ip(self):\n for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):\n with self.assertRaises(ValidationError):\n self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,\n ip_type='4')",
"def _validator_target(self, field, value):\n if not REG.match(value):\n self._error(field, \"{} is not a valid target\".format(value))",
"def valid_target(blockDataHash, dateCreated, nonce, target):\t\t\r\n\t\tcoefficient = int(hex(target)[-6:], 16)\r\n\t\texponent = int(hex(target)[:-6], 16)\r\n\t\ttargetValue = coefficient * (2 ** (8*(exponent-3)))\r\n\t\tt = blockDataHash + \"|\" + str(dateCreated) + \"|\" + nonce\r\n\t\tminedBlockHash = hashlib.sha256(t.encode(\"utf8\")).hexdigest()\r\n\t\t\r\n\t\tif (int(minedBlockHash,16) < targetValue):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"def checkTargetSupported(self,target) -> bool:\n if target in self.getSupportedTargets():#checks the target is in the list of supported targets\n return True\n else:\n return False",
"def supported_target(self, target, message_handler):\n\n # iOS can never be a host.\n return False",
"def validate_target(data, handshake):\n\n if data['header'] != handshake:\n END_POINT({\n 'status': 'invalid-handshake',\n 'handshake': handshake,\n })\n comment('handshake: %r' % data['header'])\n\n # Import all requested modules\n for mod in data.get('imports', ()):\n importlib.import_module('boxed')\n try:\n importlib.import_module(mod)\n except ImportError:\n END_POINT({\n 'status': 'invalid-import',\n 'module': mod,\n })\n comment('all modules successfully imported')\n\n # If the target attribute is a callable, simply return it\n target = data['target']\n if callable(target):\n return target\n\n # If it is a path string, we load the proper target function in the given\n # location.\n mod, _, func = data['target'].rpartition('.')\n try:\n mod = importlib.import_module(mod)\n target = getattr(mod, func)\n except ImportError as ex:\n END_POINT({\n 'status': 'invalid-target',\n 'message':\n 'could not import module %r. Maybe it must be passed it to '\n 'the \"imports\" argument.' % mod,\n })\n except AttributeError:\n END_POINT({\n 'status': 'invalid-target',\n 'message':\n 'could not find function \"%s\" in module %s' % (func, mod),\n })\n comment('target function loaded as %s' % funcname(target))\n return target",
"def valid_target(start, target, words):\r\n if target.isalpha(): # target word must be alphabetic\r\n if len(start) == len(target): # target word must be same size as start word\r\n if start != target: # target and start words must be different\r\n if target in words: # target word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Target word not in list of words....please reenter\"\r\n else:\r\n return \"Target word must be different from Start word....please reenter\"\r\n else:\r\n return \"Target word must be same length as Start word....please reenter\"\r\n else:\r\n return \"Target word must contain only letters....please reenter\"",
"def host_is_target(target):\n return host_arch_target() == target_arch(target)",
"def validate_host():\n return opTestSys.sys_bmc_power_on_validate_host()",
"def transfer_target_validation(source_target, destination_target):\n json_data = read_file('presqt/specs/targets.json', True)\n\n for data in json_data:\n if data['name'] == source_target:\n if destination_target not in data['supported_transfer_partners']['transfer_out']:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not allow transfer to '{}'.\".format(\n source_target, destination_target),\n status.HTTP_400_BAD_REQUEST)\n\n elif data['name'] == destination_target:\n if source_target not in data['supported_transfer_partners']['transfer_in']:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not allow transfer from '{}'.\".format(\n destination_target, source_target),\n status.HTTP_400_BAD_REQUEST)\n\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
[authorize and initialize spotify client] | def init_auth_client(self):
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
token = util.prompt_for_user_token(
cfg['username'],
scope=cfg['scope'],
client_id=cfg['spotipy_client_id'],
client_secret=cfg['spotipy_client_secret'],
redirect_uri=cfg['spotipy_redirect_uri'])
sp = spotipy.Spotify(auth=token)
return sp, cfg['username'] | [
"def init_client(self):\n \n creds = sc.load_creds()\n client_id = creds['client_id']\n client_secret = creds['client_secret']\n client_credentials_manager = SpotifyClientCredentials(client_id=client_id,\n client_secret=client_secret)\n self.sp = spotipy.Spotify(client_credentials_manager=\n client_credentials_manager)",
"def init_user(self) -> Any:\n return \\\n spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-public\",\n client_id=self._public_id, client_secret=self._secret_id,\n redirect_uri=self._redirect_uri))",
"def make_client():\n\n scope = 'streaming user-read-playback-state user-read-currently-playing'\n token, _ = util.prompt_for_user_token('spotipy_user', scope)\n spotify_client = Spotify(auth=token)\n return spotify_client",
"def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):\r\n auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID, \r\n client_secret=SPOTIPY_CLIENT_SECRET)\r\n \r\n return spotipy.Spotify(auth_manager=auth_manager)",
"def auth(self):\n token = spotipy.util.prompt_for_user_token(self.username,\n self.scope,\n client_id = self.client_id,\n client_secret = self.client_secret,\n redirect_uri= self.redirect_uri)\n if token:\n self.spotify = spotipy.Spotify(auth=token)\n else:\n print(colored.stylize(\"\"\"\\n[*] \"\"\", colored.fg(\"light_red\")) + 'Cant get token for: %s\\n' % (self.username))\n exit()",
"def get_auth(config:dict) -> sp.SpotifyOAuth:\n return sp.SpotifyOAuth(\n client_id=config[\"CLIENT_ID\"],\n client_secret=config[\"CLIENT_SECRET\"],\n redirect_uri=config[\"REDIRECT_URI\"],\n scope=SCOPE,\n )",
"def Connect(self,scope):\n\n \"\"\"\n Calling util.prompt_for_user_token will open Spotify’s application authorization\n page in your browser (and require you to log in if you are not already logged in\n to spotify.com), unless a locally cached access token exist from a previous authorization/authentication.\n \"\"\"\n try:\n token = util.prompt_for_user_token(\n self.username,\n scope,\n self.client_id,\n self.secret_id,\n self.redirect_uri)\n except ImportError:\n self._isConnected = False\n print(\" onnecting to Spotify failed\") \n\n\n if token:\n sp = spotipy.Spotify(auth=token)\n self._isConnected = True\n return sp\n else:\n print(\"Can't get token for\", self.username)\n self._isConnected = False",
"def connect(cls, creds: SpotifyClientCredentials):\n cls._creds = creds\n cls._spotify_api = _Spotify(client_credentials_manager=creds)",
"def create_spot_oauth():\n\n return SpotifyOAuth(\n client_id=SPOT_CLIENT_ID, \n client_secret=SPOT_CLIENT_SECRET, \n redirect_uri=url_for(\"spot.spot_oauth2callback\", _external=True),\n scope=spot_scope)",
"def authorize():\n scopes = 'playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative user-read-email user-read-private'\n\n spotify_authorize_url = 'https://accounts.spotify.com/authorize?'\n params = {\n 'response_type': 'code', \n 'client_id': SPOTIFY_CLIENT_ID,\n 'redirect_uri': 'http://0.0.0.0:5000/callback',\n 'scope': scopes, \n 'show_dialog': True\n }\n\n query_params = urllib.parse.urlencode(params)\n response = make_response(redirect(spotify_authorize_url + query_params))\n return response",
"def _get_spotify_client(token):\n\n if not token:\n raise Exception(\"No token\")\n\n return spotipy.Spotify(auth=token)",
"def __createSpotifyObject(self):\n \n # Defining the scope(s) of the application\n scope = \"playlist-modify-public playlist-modify-private user-read-currently-playing\"\n\n # Getting the token\n token = util.prompt_for_user_token(username=USER, scope=scope, client_id=CLIENT_ID, client_secret=CLIENT_SECRET, redirect_uri=\"https://google.com/\")\n\n # Returning our spotify object\n return spotipy.Spotify(auth=token)",
"def async_setup(hass, config):\n import spotipy.oauth2\n import json\n global AIS_SPOTIFY_TOKEN\n\n try:\n ws_resp = aisCloud.key(\"spotify_oauth\")\n json_ws_resp = ws_resp.json()\n spotify_redirect_url = json_ws_resp[\"SPOTIFY_REDIRECT_URL\"]\n spotify_client_id = json_ws_resp[\"SPOTIFY_CLIENT_ID\"]\n spotify_client_secret = json_ws_resp[\"SPOTIFY_CLIENT_SECRET\"]\n spotify_scope = json_ws_resp[\"SPOTIFY_SCOPE\"]\n try:\n ws_resp = aisCloud.key(\"spotify_token\")\n key = ws_resp.json()[\"key\"]\n AIS_SPOTIFY_TOKEN = json.loads(key)\n except:\n AIS_SPOTIFY_TOKEN = None\n _LOGGER.info(\"No AIS_SPOTIFY_TOKEN\")\n except Exception as e:\n _LOGGER.error(\"No spotify oauth info: \" + str(e))\n return False\n\n cache = hass.config.path(DEFAULT_CACHE_PATH)\n gate_id = ais_global.get_sercure_android_id_dom()\n oauth = spotipy.oauth2.SpotifyOAuth(spotify_client_id, spotify_client_secret, spotify_redirect_url,\n scope=spotify_scope, cache_path=cache, state=gate_id)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token in cache;\")\n if AIS_SPOTIFY_TOKEN is not None:\n with open(cache, 'w') as outfile:\n json.dump(AIS_SPOTIFY_TOKEN, outfile)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token; run configurator\")\n async_request_configuration(hass, config, oauth)\n return True\n\n if hass.data.get(DOMAIN):\n configurator = hass.components.configurator\n configurator.request_done(hass.data.get(DOMAIN))\n del hass.data[DOMAIN]\n\n # register services\n data = hass.data[DOMAIN] = SpotifyData(hass, oauth)\n\n # service = configured_service(hass)\n\n @asyncio.coroutine\n def search(call):\n _LOGGER.info(\"search \" + str(call))\n yield from data.process_search_async(call)\n\n def select_track_name(call):\n _LOGGER.info(\"select_track_name\")\n data.process_select_track_name(call)\n\n def change_serive(call):\n _LOGGER.info(\"change_serive\")\n data.change_serive(call)\n\n hass.services.async_register(DOMAIN, 'search', search)\n hass.services.async_register(DOMAIN, 'select_track_name', select_track_name)\n hass.services.async_register(DOMAIN, 'change_serive', change_serive)\n\n return True",
"def authorize(self):\n\t\ttry:\n\t\t\tauth_url = 'https://accounts.spotify.com/api/token'\n\t\t\theaders={}\n\t\t\tdata={}\n\n\t\t\tdata_string = f\"{self.client_id}:{self.client_secret}\"\n\n\t\t\tdata_bytes = data_string.encode(\"ascii\")\n\t\t\tbase_bytes = base64.b64encode(data_bytes)\n\t\t\tbase_message = base_bytes.decode(\"ascii\")\n\n\t\t\theaders['Authorization'] = f\"Basic {base_message}\"\n\n\t\t\tdata = parse.urlencode({\"grant_type\": \"client_credentials\"})\n\t\t\tdata = data.encode('ascii')\n\n\t\t\treq = request.Request(auth_url,data=data, headers=headers)\n\t\t\tlogging.info(\"Successfully called Spotify token API!\")\n\t\texcept:\n\t\t\tlogging.error(\"Failed to create authorization request!\")\n\t\t\treturn False\n\t\t\t\n\t\tif req is not None:\n\t\t\ttry:\n\t\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\texcept error.URLError as e:\n\t\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\t\tlogging.error(response)\n\t\t\t\treturn False\n\t\t\n\t\ttry:\n\t\t\t_json = json.loads(response)\n\t\t\tself.token = _json[\"access_token\"]\n\t\t\tlogging.info(\"Successfully received token from Spotify!\")\n\t\texcept:\n\t\t\tlogging.error(\"Could not fetch token from response!\")\n\t\t\treturn False\n\t\t\t\n\t\treturn True",
"def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}",
"def requestAuth():\n endpoint = \"https://accounts.spotify.com/authorize\"\n payload = {\n \"client_id\": CLIENT_ID,\n \"response_type\": \"code\",\n \"redirect_uri\": \"https://frozen-fjord-48065.herokuapp.com/callback\",\n # \"state\": \"sdfdskjfhkdshfkj\",\n \"scope\": \"playlist-modify-public user-read-private\",\n # \"show_dialog\": True\n }\n\n # Create query string from params\n # url_arg = \"&\".join([\"{}={}\".format(key, quote_params_val(val)) for\n # key, val in params.items()])\n url_arg = params_query_string(payload)\n\n # Request URL\n auth_url = endpoint + \"/?\" + url_arg\n #print \"AUTH_URL\", auth_url\n # User is redirected to Spotify where user is asked to authorize access to\n # his/her account within the scopes\n return redirect(auth_url)",
"def init_api():\n global soundcloud\n import json\n \n SECRETS_VERSION = 1\n \n # Load secrets file\n if os.path.exists(config.token_cache):\n with open(config.token_cache, 'r', encoding='utf-8') as f:\n secrets = json.load(f)\n else:\n secrets = {}\n \n # Try to reuse the cached access token\n if secrets\\\n and secrets['version'] == SECRETS_VERSION\\\n and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\\\n and secrets['username'] == config.username:\n \n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n access_token=secrets['access_token']\n )\n return\n \n # Get a new access token\n logging.info('Getting a new access token') \n try:\n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n username=config.username,\n password=config.password\n )\n except HTTPError as e:\n if e.response.status_code == 401:\n logging.critical('Incorrect API key, login or password. Please, edit config.py.')\n sys.exit(1)\n else:\n raise\n \n # Save the token\n secrets = {\n 'version': SECRETS_VERSION,\n 'username': config.username,\n 'access_token': soundcloud.access_token,\n 'access_token_acquired_at': time(),\n 'access_token_expires_in': soundcloud.token.expires_in,\n }\n \n with open(config.token_cache, 'w', encoding='utf-8') as f:\n secrets = json.dump(secrets, f, indent='\\t', ensure_ascii=False)",
"def check_spotify_credentials():\n\n\tclient_credentials_manager = SpotifyClientCredentials(client_id=os.environ.get(\"SPOTIFY_CID\"), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tclient_secret=os.environ.get(\"SPOTIFY_SECRET\"))\n\n\tsp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)\n\n\tif (os.environ.get(\"SPOTIFY_CID\") == None) | (os.environ.get(\"SPOTIFY_SECRET\") == None):\n\t\tlogger.warning(\"Spotify credentials not set up as environment variables\")\n\n\ttry:\n\t\tquery = sp.search(\"Rick+Astley+Never+Gonna+Give+You+Up\")\n\t\tartist = query['tracks']['items'][0]['artists'][0]['name']\n\t\tsong = query['tracks']['items'][0]['name']\n\n\t\tprint(\"I LOVE THE SONG {} BY {}\".format(song, artist))\n\n\t\tlogger.info(\"Spotify credentials enable API query\")\n\texcept:\n\t\tlogger.warning(\"Spotify credentials cannot query API\")",
"def __init__(self, client_access_token, artist_name):\n self.client_access_token = client_access_token\n self.artist_name = artist_name\n self.base_url = 'https://api.genius.com/'\n self.headers = {'Authorization': 'Bearer ' + self.client_access_token}\n self.artist_songs = None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
[creates a new playlist with given name, desc with given limts] | def create_new_playlist(self, name, desc=''):
pl_names, _, _ = self.list_playlists()
if name in pl_names:
self.logger.debug(
'Playlist Name Already Exists, please use another name')
else:
pl = self.sp.user_playlist_create(
self.user, name, public=False, description=desc)
self.sp.user_playlist_change_details(
self.user, pl['id'], collaborative=True) | [
"def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")",
"def create_spot_playlist(name, description=\"\"):\n \n auth_manager = create_spot_oauth()\n\n sp = Spotify(auth_manager=auth_manager)\n\n return sp.user_playlist_create(user=sp.current_user()[\"id\"], name=name, description=description)",
"def create(self, name: str) -> Optional[Playlist]:\n raise NotImplementedError",
"def new_playlist(name: str, items_and_track_nums: [[]]):\n p = Playlist(name=name)\n p.save()\n\n if items_and_track_nums:\n for [item, track_num] in items_and_track_nums:\n if item.__class__ == Song:\n new_playlisttosong(p, item, track_num)\n if item.__class__ == Album:\n new_playlisttoalbum(p, item, track_num)\n if item.__class__ == Playlist:\n new_playlisttoplaylist(p, item, track_num)\n\n p.save()\n return p",
"def new_playlist(\n project,\n name,\n episode=None,\n for_entity=\"shot\",\n for_client=False,\n client=default\n):\n project = normalize_model_parameter(project)\n data = {\n \"name\": name,\n \"project_id\": project[\"id\"],\n \"for_entity\": for_entity,\n \"for_client\": for_client\n }\n if episode is not None:\n episode = normalize_model_parameter(episode)\n data[\"episode_id\"] = episode[\"id\"]\n playlist = get_playlist_by_name(project, name, client=client)\n if playlist is None:\n playlist = raw.post(\"data/playlists/\", data, client=client)\n return playlist",
"def create_playlist(self) -> str:\r\n raise NotImplementedError",
"def create_playlist(self, data):\n pass",
"def create_playlist(self, playlist_name):\n msg = self._playlist.create_playlist(playlist_name)\n print(msg)",
"def create_playlist(self, playlist_name):\n \n # lower case the playlist name\n pl = playlist_name.lower()\n # create a whole new playlist\n if pl:\n print(\"Successfully created new playlist:\", playlist_name)\n # if the playlist is already existing\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")",
"def create_playlist(self, request):\n # TODO: Max amount of playlists at 20 for a user\n user = Account.find_by_id(request.userid)\n if user is None:\n print \"User not found\" \n return PlaylistResponse(errmsg=\"User ID not found\")\n new_pl = Playlist.add_new_playlist(user.key, request.name)\n return PlaylistResponse(pid=new_pl.key.id())",
"def playlist_create(self, user_id: str, name: str, public: bool = True,\n description: str = ''):\n payload = {\n 'name': name,\n 'public': public,\n 'description': description\n }\n return self._post(f'users/{user_id}/playlists', payload=payload)",
"def create_playlist(self, playlist_name):\n if playlist_name.lower() in self._playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n print(f\"Successfully created new playlist: {playlist_name}\")\n self._playlists[playlist_name.lower()] = Playlist(playlist_name)",
"def create_playlist(self, cur_user):\n \n if len(cur_user.playlists) > 4:\n print(\"Maximum number of playlists reached\")\n return\n\n playlist_name = input(\"Give your playlist a name: \")\n \n print(\"Here are your favorite genres: \" + str(cur_user.fav_genres))\n \n playlist = []\n \n for g in cur_user.fav_genres:\n genre_songs = self.catelog[self.catelog['genre'] == g]\n genre_songs = genre_songs[['genre', 'artist_name', 'track_name']]\n picks = genre_songs.sample(n=5)\n playlist.append(picks)\n \n playlist = pd.concat(playlist)\n \n print(\"This is your playlist!\")\n print(playlist)\n \n cur_user.playlists[playlist_name] = playlist",
"def create_playlist(user_id, sp, recommendations, name, description):\r\n \r\n # Get current user ID\r\n current_user = sp.current_user()\r\n current_user_id = current_user['id']\r\n \r\n # Get list of track ID's\r\n track_id_list = list(recommendations['id'].values)\r\n \r\n # Create Empty playlist\r\n sp.user_playlist_create(user = user_id, \r\n name = name, \r\n description = description)\r\n \r\n # Get playlist ID\r\n playlists = sp.current_user_playlists(limit=1)\r\n playlist_name = playlists['items'][0]['name']\r\n playlist_id = playlists['items'][0]['id']\r\n \r\n # Add tracks to playlist\r\n sp.user_playlist_add_tracks(user = current_user_id, \r\n playlist_id = playlist_id, \r\n tracks = track_id_list)\r\n \r\n # Check if playlist is succesfully created.\r\n if name == playlist_name:\r\n return '**Playlist was succesfully created on your Spotify account.**'\r\n else:\r\n return '**Playlist was not succesfully created.**'",
"def createPlaylists(N):\n for playlist_param in range(N):\n playlist = Playlist(\n name=getRandomName(),\n user=getRandomObject(get_user_model())\n )\n playlist.save()\n playlist.songs.set(getRandomObject(Song, randint(1, 10)))\n print(\"Created playlist:\", playlist)",
"def createPlaylist(youtube):\n playlists_insert_response = youtube.playlists().insert(\n part=\"snippet,status\",\n body=dict(\n snippet=dict(\n title=\"RedditSavedToYoutube\", #Playlist title\n description=\"Youtube videos found in your reddit saved posts.\" #Playlist description.\n ),\n status=dict(\n privacyStatus=\"private\" #Playlist privay settings.\n )\n )\n ).execute()\n\n #print(\"New playlist id: %s\" % playlists_insert_response[\"id\"])",
"def create_playlist(auth_header, spotify_user_id, playlist_name, activity_id):\n\n payload = { \n 'name' : playlist_name\n }\n\n USER_PLAYLIST_ENDPOINT = \"{}/{}/{}/{}\".format(SPOTIFY_API_URL, 'users', spotify_user_id, 'playlists')\n playlist_data = requests.post(USER_PLAYLIST_ENDPOINT, data=json.dumps(payload), headers=auth_header).json()\n playlist_uri = playlist_data['uri']\n user_id = session.get('logged_user')['username']\n\n new_playlist = Playlist(playlist_name = playlist_name,\n user_id = user_id,\n playlist_uri = playlist_uri, \n activity_id = activity_id)\n db.session.add(new_playlist)\n db.session.commit()\n\n playlist_id = new_playlist.playlist_id\n \n return playlist_id",
"def add_new_playlist(self, name, index=None):\n self._validate_name(name)\n sp_playlist = lib.sp_playlistcontainer_add_new_playlist(\n self._sp_playlistcontainer, utils.to_char(name)\n )\n if sp_playlist == ffi.NULL:\n raise spotify.Error(\"Playlist creation failed\")\n playlist = spotify.Playlist._cached(self._session, sp_playlist, add_ref=True)\n if index is not None:\n self.move_playlist(self.__len__() - 1, index)\n return playlist",
"def createspotifyplaylist(accesstoken, name, playlists, tracklist, userid):\n\n # find a unique name for the playlist\n playlistname = \"{} - flowed\".format(name)\n if playlistname in playlists:\n num = 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n while playlistname in playlists:\n num = num + 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n\n # create playlist\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"name\"] = playlistname\n\n url = \"https://api.spotify.com/v1/users/{}/playlists\".format(userid)\n\n r = requests.post(url, headers=headers, json=payload)\n\n response = r.json()\n\n\n if \"collaborative\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(url, headers=headers, json=payload)\n response = r.json()\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n elif \"collaborative\" in response:\n break\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n else: \n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n\n playlistid = response[\"id\"]\n playlisturl = response[\"external_urls\"][\"spotify\"]\n\n # add tracks to playlist\n while len(tracklist) > 100:\n\n # add first 100\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist[:100]\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print(\"error: problem adding songs to playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem adding songs to playlist\")\n print(\"no error response\")\n return(False)\n\n tracklist = tracklist[100:]\n\n if tracklist:\n\n # add the remainder of the tracks\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n response = r.json()\n if \"snapshot_id\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n\n return(playlistname, playlisturl)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculates TS Percentage metric for a player | def set_ts_percentage(self):
bx = self.get_standard_stats()
ptos = float(bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"])
tcInt = float(bx["t2p_int"] + bx["t3p_int"])
tsAttempts = float(tcInt + (0.44*float(bx["tl_int"])))
result = 0.00
if tsAttempts > 0.00:
result = (ptos/(2*tsAttempts))*100
self.ts_percentage = "%.2f" % round(result, 2) | [
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def percent_score(self):\n return self.score * 100",
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)",
"def _getPercentage(self):\n\t\treturn float(self.value)/float(self.maxvalue-self.minvalue)",
"def percent_difference_time(self, old, new):\n diff = old - new\n avg = (old + new) / 2\n perc = (diff / avg) * 100\n return perc",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def calc_percentage(self, total_entries):\n self.percentage = self.count/total_entries * 100",
"def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def materialPercentage(material,player):\n whitescore,blackscore = material\n if player == 0:\n return (whitescore/(blackscore+whitescore))\n else:\n return (blackscore/(blackscore+whitescore))",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def win_pct(self):\n wins = self.wins_money\n losses = self.losses_money\n try:\n return '{0:.0f} percent'.format((wins / (wins + losses)) * 100)\n except ZeroDivisionError:\n return \"N/A\"",
"def ppp_score_percentage(self):\r\n objects = self.__get_objects()\r\n z1 = str(objects[1]).strip().split()\r\n z2 = float(z1[14]) # will be the floating point number 8.3\r\n db = binom.rvs(n=10, p=z2, size=10000)\r\n a = np.array(db)\r\n b = np.mean(a)*100/10\r\n return b",
"def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate USG% for each player from each team | def set_usg_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
tcInt = bx["t2p_int"] + bx["t3p_int"]
a = tcInt + (Decimal('0.44')*bx["tl_int"]) + bx["turnovers"]
b = team["minutes"]/5
c = (team["t2p_int"] + team["t3p_int"]) + (Decimal('0.44')*team["tl_int"]) + team["turnovers"]
result = 0.00
if bx["minutes"] > 0:
result = ((Decimal(a)*Decimal(b))/(bx["minutes"]*c))*100
self.usg_percentage = "%.2f" % round(result, 2) | [
"def opp_strength(team):\n opponent_count = 0\n opponent_wins = 0\n\n gov_rounds = team.gov_team\n opp_rounds = team.opp_team\n\n for round_obj in gov_rounds.all():\n opponent_wins += tot_wins(round_obj.opp_team)\n opponent_count += 1\n for round_obj in opp_rounds.all():\n opponent_wins += tot_wins(round_obj.gov_team)\n opponent_count += 1\n\n if opponent_count > 0:\n return float(opponent_wins) / float(opponent_count)\n else:\n return 0.0",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def calculate_uk_percentage_gross(): \n \n #get movies from db\n movies_df = get_movies_df()\n \n #calculate return percentage based on worldwide gross and uk gross\n movies_df[\"worldwide_norm\"] = movies_df[\"worldwide_gross_usd\"].replace('[\\£,]', '', regex=True).astype(float)\n movies_df[\"uk_takings_norm\"] = movies_df[\"uk_gross_usd\"].replace('[\\£,]', '', regex=True).astype(float)\n movies_df[\"uk_percentage\"] = (movies_df[\"uk_takings_norm\"] / movies_df[\"worldwide_norm\"]) * 100\n \n #update the db\n for index, row in movies_df.iterrows(): \n updates = { \"uk_percentage\" : row[\"uk_percentage\"] }\n selects = {\"movieId\" : row[\"movieId\"]}\n database_helper.update_data(\"movies\", update_params = updates, select_params = selects)\n \n #return list of movies\n return movies_df",
"def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)",
"def simulate(team, N=100):\n\n total_score = 0.0\n for player in team:\n simulation_score = []\n for i in range(N):\n simulation_score.append(get_player_score(player))\n total_score += np.mean(simulation_score)\n\n return total_score",
"def get_team_results(usrs, sched):\t\n\t\n\ttotal_consistency = 0\n\ttotal_completion = 0\n\tfor user in usrs:\n\t\tresult = get_consistency(user, sched)\n\t\t\n\t\ttotal_consistency += result[\"consistency\"]\n\t\ttotal_completion += result[\"completion\"]\n\t\n\tteam_consistency = 0\n\tteam_completion = 0\n\t\t\n\tif(len(usrs) != 0):\n\t\tteam_consistency = total_consistency / float(len(usrs))\n\t\tteam_completion = total_completion / float(len(usrs))\n\t\t\n\treturn { \"consistency\" : team_consistency, \"completion\" : team_completion }",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def opponent_game_win_percentage(self, standings=STANDINGS):\n tot = 0\n n = 0\n for opp in self.get_opponents(standings):\n n += 1\n tot += opp.game_win_percentage()\n return tot / (n or -1)",
"def calculate_stats(grouped_scores):\n overall_stats = []\n\n for team, scores in grouped_scores:\n team_stats = [team.ljust(30)] # Team Name\n team_stats.extend(\n (\n len(scores), # Matches Played\n scores.count(3), # Wins\n scores.count(1), # Draws\n scores.count(0), # Losses\n sum(scores), # Points\n )\n )\n overall_stats.append(team_stats)\n\n return overall_stats",
"def opponent_match_win_percentage(self, standings=STANDINGS):\n tot = 0\n n = 0\n for opp in self.get_opponents(standings):\n n += 1\n tot += opp.match_win_percentage()\n return tot / (n or -1)",
"def average_stats(self, count_matches, stats):\n\n for steam in stats:\n for stat in stats[steam]:\n if stat == sC.NAME_SMALL or stat == sC.NICK_SMALL:\n continue\n\n team, _, _ = self.get_details_by_id(steam)\n stats[steam][stat] = round(stats[steam][stat] / count_matches[self.remove_spl_chars(team)], 1)",
"def total_strength(self, teams):\n total_strength = 0\n for team in teams:\n total_strength += team.strength\n return total_strength",
"def materialPercentage(material,player):\n whitescore,blackscore = material\n if player == 0:\n return (whitescore/(blackscore+whitescore))\n else:\n return (blackscore/(blackscore+whitescore))",
"def main(simulations, userschoice):\n # The teams data are obtained from FIFA statistics\n # Team Name, Attack, Defence\n quarters = ['quarter1', 'quarter2', 'quarter3', 'quarter4', 'quarter5', 'quarter6', 'quarter7', 'quarter8']\n semifinalists = ['semifinalist1', 'semifinalist2', 'semifinalist3', 'semifinalist4']\n finalists = ['finalist1', 'finalist2']\n\n df = pd.read_csv('FifaRankings.csv', index_col=\"Ranking\")\n a_set = set()\n while True:\n a_set.add(randint(42, 85))\n if len(a_set) == 32:\n break\n lst1 = sorted(list(a_set), reverse=True)\n\n a_set = set()\n while True:\n a_set.add(randint(38, 83))\n if len(a_set) == 32:\n break\n lst2 = sorted(list(a_set), reverse=True)\n print(\"\\n\")\n df['Attack'] = lst1\n df['Defence'] = lst2\n a = list(df[\"Team\"])\n\n avgScored = 0\n avgConceded = 0\n avgScored = df['Attack'].sum()\n avgConceded = df['Defence'].sum()\n\n avgScored = avgScored / len(df)\n avgConceded = avgConceded / len(df)\n print(\"\\n\")\n avgattack = []\n avgdefense = []\n\n for i in range(1, 33):\n if df['Matches Played'][i] != 0:\n win_rate = (df['WorldCup Wins'][i] / df['Matches Played'][i])\n else:\n win_rate = 0\n avgattack.append((df['Attack'][i] / avgScored) + win_rate)\n avgdefense.append((df['Defence'][i] / avgConceded) + win_rate)\n\n df['Avg Attack'] = avgattack\n df['Avg Defense'] = avgdefense\n\n\n teamstats=[]\n for i in range(1,len(df)+1):\n teaminfo=[]\n teaminfo = (df[\"Team\"][i], df['Avg Attack'][i], df['Avg Defense'][i])\n teaminfo=list(teaminfo)\n teamstats.append(teaminfo)\n\n germany = WorldCupTeam(\"GERMANY\", teamstats)\n brazil = WorldCupTeam(\"BRAZIL\", teamstats)\n belgium = WorldCupTeam(\"BELGIUM\", teamstats)\n portugal = WorldCupTeam(\"PORTUGAL\", teamstats)\n argentina = WorldCupTeam(\"ARGENTINA\", teamstats)\n france = WorldCupTeam(\"FRANCE\", teamstats)\n switzerland = WorldCupTeam(\"SWITZERLAND\", teamstats)\n spain = WorldCupTeam(\"SPAIN\", teamstats)\n russia = WorldCupTeam(\"RUSSIA\", teamstats)\n japan = WorldCupTeam(\"JAPAN\", teamstats)\n polland=WorldCupTeam(\"POLLAND\", teamstats)\n korea_republic = WorldCupTeam(\"KOREA REPUBLIC\", teamstats)\n england = WorldCupTeam(\"ENGLAND\", teamstats)\n denmark= WorldCupTeam(\"DENMARK\", teamstats)\n peru= WorldCupTeam(\"PERU\", teamstats)\n tunisia=WorldCupTeam(\"TUNISIA\", teamstats)\n mexico = WorldCupTeam(\"MEXICO\", teamstats)\n colombia = WorldCupTeam(\"COLOMBIA\", teamstats)\n uruguay = WorldCupTeam(\"URUGUAY\", teamstats)\n croatia = WorldCupTeam(\"CROATIA\", teamstats)\n australia = WorldCupTeam(\"AUSTRALIA\", teamstats)\n iceland=WorldCupTeam(\"ICELAND\", teamstats)\n sweden=WorldCupTeam(\"SWEDEN\", teamstats)\n costa_rica = WorldCupTeam(\"COSTA RICA\", teamstats)\n senegal=WorldCupTeam(\"SENEGAL\", teamstats)\n serbia=WorldCupTeam(\"SERBIA\", teamstats)\n morrocco=WorldCupTeam(\"MORROCCO\", teamstats)\n egypt=WorldCupTeam(\"EGYPT\", teamstats)\n nigeria = WorldCupTeam(\"NIGERIA\", teamstats)\n saudi_arabia=WorldCupTeam(\"SAUDI ARABIA\", teamstats)\n panama=WorldCupTeam(\"PANAMA\", teamstats)\n iran = WorldCupTeam(\"IRAN\", teamstats)\n\n\n #INPUT USERS CHOICE FOR FIXED CHOICE\n choices= [\"random\", \"Random\", \"RANDOM\"]\n choicess = [\"fixed\", \"Fixed\", \"FIXED\"]\n if userschoice in choices:\n countries = [germany, brazil, belgium, portugal, argentina, france, switzerland, spain, russia, japan, polland,\n korea_republic, england, denmark, peru, tunisia, mexico, colombia, uruguay, croatia, australia,\n iceland, sweden, costa_rica, senegal, serbia, morrocco, egypt, nigeria, saudi_arabia, panama, iran]\n finalresults = {}\n\n GroupA, GroupB, GroupC, GroupD, GroupE, GroupF, GroupG, GroupH = ([] for i in range(8))\n\n Groups = [GroupA, GroupB, GroupC, GroupD, GroupE, GroupF, GroupG, GroupH]\n for i in Groups:\n for j in range(4):\n teamname = choice(countries)\n i.append(teamname)\n countries.remove(teamname)\n\n print(\"DRAWS for the WorldCup 2018 are:\")\n print(\"\\n\")\n for i in range(simulations):\n # Play first stage\n print(\"Result of\", i + 1, \"simulations\")\n print(\"--------------------------------------------\")\n print(\"This is GROUP STAGE\")\n print(\"\\n\")\n print(\"GROUP A RESULTS\")\n print(\"\\n\")\n groupA = TeamPool(Groups[0])\n print(\"\\n\")\n print(\"GROUP B RESULTS\")\n print(\"\\n\")\n groupB = TeamPool(Groups[1])\n print(\"\\n\")\n print(\"GROUP C RESULTS\")\n print(\"\\n\")\n groupC = TeamPool(Groups[2])\n print(\"\\n\")\n print(\"GROUP D RESULTS\")\n print(\"\\n\")\n groupD = TeamPool(Groups[3])\n print(\"\\n\")\n print(\"GROUP E RESULTS\")\n print(\"\\n\")\n groupE = TeamPool(Groups[4])\n print(\"\\n\")\n print(\"GROUP F RESULTS\")\n print(\"\\n\")\n groupF = TeamPool(Groups[5])\n print(\"\\n\")\n print(\"GROUP G RESULTS\")\n print(\"\\n\")\n groupG = TeamPool(Groups[6])\n print(\"\\n\")\n print(\"GROUP H RESULTS\")\n print(\"\\n\")\n groupH = TeamPool(Groups[7])\n\n # Play second stage\n print(\"\\n\")\n print(\"ROUND OF 16\")\n print(\"\\n\")\n r16 = [groupA.first_qualified, groupA.second_qualified, groupB.first_qualified, groupB.second_qualified,\n groupC.first_qualified, groupC.second_qualified, groupD.first_qualified, groupD.second_qualified,\n groupE.first_qualified, groupE.second_qualified, groupF.first_qualified, groupF.second_qualified,\n groupG.first_qualified, groupG.second_qualified, groupH.first_qualified, groupH.second_qualified]\n\n\n GroupP, GroupQ, GroupR, GroupS, GroupT, GroupU, GroupV, GroupW =([] for i in range(8))\n\n round16groups = [GroupP, GroupQ, GroupR, GroupS, GroupT, GroupU, GroupV, GroupW]\n\n for k in round16groups:\n for j in range(2):\n teamname = choice(r16)\n k.append(teamname)\n r16.remove(teamname)\n\n for i in range(8):\n quarters[i]=WorldCupMatch(round16groups[i][0], round16groups[i][1], False).winner\n\n # Quarters\n print(\"\\n\")\n print(\"QUARTER - FINALS\")\n print(\"\\n\")\n quarterfinal = [quarters[0], quarters[1], quarters[2], quarters[3], quarters[4], quarters[5], quarters[6],\n quarters[7]]\n GroupA1, GroupB1, GroupC1, GroupD1 = ([] for i in range(4))\n\n quarterfinalgroups = [GroupA1, GroupB1, GroupC1, GroupD1]\n\n i = 0\n for i in quarterfinalgroups:\n for j in range(2):\n teamname = choice(quarterfinal)\n i.append(teamname)\n quarterfinal.remove(teamname)\n\n for i in range(4):\n semifinalists[i] = WorldCupMatch(quarterfinalgroups[i][0], quarterfinalgroups[i][1], False).winner\n\n # Semifinals\n print(\"\\n\")\n print(\"SEMI - FINALS\")\n print(\"\\n\")\n\n semifinal = [semifinalists[0], semifinalists[1], semifinalists[2], semifinalists[3]]\n GroupP1, GroupQ1 = ([] for i in range(2))\n semifinalgroups = [GroupP1, GroupQ1]\n\n i = 0\n for i in semifinalgroups:\n for j in range(2):\n teamname = choice(semifinal)\n i.append(teamname)\n semifinal.remove(teamname)\n\n for i in range(2):\n finalists[i] = WorldCupMatch(semifinalgroups[i][0], semifinalgroups[i][1], False).winner\n # Finals\n print(\"\\n\")\n print(\"WORLD-CUP FINAL\")\n print(\"\\n\")\n winner = WorldCupMatch(finalists[0], finalists[1], False).winner\n print(\"\\n\")\n\n if winner.name in finalresults:\n finalresults[winner.name] += 1\n else:\n finalresults[winner.name] = 1\n\n for key in sorted(finalresults, key=finalresults.get, reverse=True):\n print(key + \": \" + str(finalresults[key] / simulations))\n ro=(finalresults[key] / simulations) * 100\n print(str(ro) + \"% chance of winning the worldcup\")\n print(\"\\n\")\n print(\"\\n\")\n\n\n elif userschoice in choicess:\n\n print(\"\\n\")\n finalresults = {}\n groupA1 = [russia , saudi_arabia,egypt, uruguay]\n groupB1 = [portugal, spain, morrocco, iran]\n groupC1 = [france, australia, peru, denmark]\n groupD1 = [argentina, iceland, croatia, nigeria]\n groupE1 = [brazil, switzerland, costa_rica, serbia]\n groupF1 = [germany, mexico, sweden, korea_republic]\n groupG1 = [belgium, panama, tunisia, england]\n groupH1 = [polland, senegal, colombia, japan]\n print(\"\\n\")\n for i in range(simulations):\n # Play first stage\n print(\"Result of\", i+1 ,\"simulations\")\n print(\"--------------------------------------------\")\n print(\"This is GROUP STAGE\")\n print(\"\\n\")\n print(\"GROUP A RESULTS\")\n print(\"\\n\")\n groupA = TeamPool(groupA1)\n print(\"\\n\")\n print(\"GROUP B RESULTS\")\n print(\"\\n\")\n groupB = TeamPool(groupB1)\n print(\"\\n\")\n print(\"GROUP C RESULTS\")\n print(\"\\n\")\n groupC = TeamPool(groupC1)\n print(\"\\n\")\n print(\"GROUP D RESULTS\")\n print(\"\\n\")\n groupD = TeamPool(groupD1)\n print(\"\\n\")\n print(\"GROUP E RESULTS\")\n print(\"\\n\")\n groupE = TeamPool(groupE1)\n print(\"\\n\")\n print(\"GROUP F RESULTS\")\n print(\"\\n\")\n groupF = TeamPool(groupF1)\n print(\"\\n\")\n print(\"GROUP G RESULTS\")\n print(\"\\n\")\n groupG = TeamPool(groupG1)\n print(\"\\n\")\n print(\"GROUP H RESULTS\")\n print(\"\\n\")\n groupH = TeamPool(groupH1)\n print(\"Qualifies teams:\", groupH.first_qualified.name)\n\n # Play second stage\n print(\"\\n\")\n print(\"ROUND OF 16\")\n print(\"\\n\")\n\n quarter1 = WorldCupMatch(groupA.first_qualified, groupA.second_qualified, False).winner\n quarter2 = WorldCupMatch(groupB.first_qualified, groupB.second_qualified, False).winner\n quarter3 = WorldCupMatch(groupC.first_qualified, groupC.second_qualified, False).winner\n quarter4 = WorldCupMatch(groupD.first_qualified, groupD.second_qualified, False).winner\n quarter5 = WorldCupMatch(groupE.first_qualified, groupE.second_qualified, False).winner\n quarter6 = WorldCupMatch(groupF.first_qualified, groupF.second_qualified, False).winner\n quarter7 = WorldCupMatch(groupG.first_qualified, groupG.second_qualified, False).winner\n quarter8 = WorldCupMatch(groupH.first_qualified, groupH.second_qualified, False).winner\n\n # Quarters\n print(\"\\n\")\n print(\"QUARTER - FINALS\")\n print(\"\\n\")\n\n semifinalist1 = WorldCupMatch(quarter1, quarter2, False).winner\n semifinalist2 = WorldCupMatch(quarter3, quarter4, False).winner\n semifinalist3 = WorldCupMatch(quarter5, quarter6, False).winner\n semifinalist4 = WorldCupMatch( quarter7, quarter8, False).winner\n\n # Semifinals\n print(\"\\n\")\n print(\"SEMI - FINALS\")\n print(\"\\n\")\n finalist1 = WorldCupMatch(semifinalist1, semifinalist2, False).winner\n finalist2 = WorldCupMatch(semifinalist3, semifinalist4, False).winner\n\n # Final\n print(\"\\n\")\n print(\"WORLD-CUP FINAL\")\n print(\"\\n\")\n winner = WorldCupMatch(finalist1, finalist2, False).winner\n print(\"\\n\")\n\n\n if winner.name in finalresults:\n finalresults[winner.name] += 1\n else:\n finalresults[winner.name] = 1\n\n for key in sorted(finalresults, key=finalresults.get, reverse=True):\n print(key + \": \" + str(finalresults[key] / simulations))\n rou = (finalresults[key] / simulations) * 100\n print(str(rou) + \"% chance of winning the worldcup\")\n print(\"\\n\")\n print(\"\\n\")\n else:\n print(\"Please enter correct input and try again\")\n pass",
"def tot_wins(team):\n normal_wins = 0\n for round_obj in team.opp_team.all():\n if round_obj.victor == Round.OPP:\n normal_wins += 1\n for round_obj in team.gov_team.all():\n if round_obj.victor == Round.GOV:\n normal_wins += 1\n return normal_wins + num_byes(team) + num_forfeit_wins(team)",
"def stats(self):\n for hero in self.heroes:\n if hero.deaths == 0:\n ratio = hero.kills\n else:\n ratio = (hero.kills / hero.deaths) * 100\n print(hero.name + \"'s kill/death radio:\", ratio)",
"def win_pct_ev(ex_ch=False, use_theoretical_rates=False, thresh=SAMPLE_THRESH, df=None):\n if df is None:\n df = fetch_game_log(parse_cols=True)\n \n d_keys = list(product(SIZES, [GOOD, BAD]))\n player_size_team_cnts = defaultdict(lambda: dict(zip(d_keys, [0] * len(d_keys)))) # dict[str, dict[(int, str), int]]\n player_game_cnts = defaultdict(int)\n for idx, row in df.iterrows():\n if ex_ch and row[CHEESY_WIN] == \"Yes\":\n continue\n num_players = row[NUM_PLAYERS]\n for role in ROLES:\n player = row[role]\n if player in [NA, UNK]:\n continue\n player_game_cnts[player] += 1\n if role in BADS:\n player_size_team_cnts[player][(num_players, BAD)] += 1\n else:\n player_size_team_cnts[player][(num_players, GOOD)] += 1\n \n if use_theoretical_rates:\n good_win_rates = GOOD_WIN_RATES_BALANCE\n else:\n good_win_rates = good_win_rates_n_players(ex_ch=ex_ch, df=df)[[\"# Players\", \"Good Win %\"]] \\\n .set_index(\"# Players\") \\\n .to_dict()[\"Good Win %\"]\n \n player_size_good_pcts = {}\n player_size_bad_pcts = {}\n for player in player_size_team_cnts:\n player_size_good_pcts[player] = defaultdict(\n int,\n {\n key[0]: player_size_team_cnts[player][key] / player_game_cnts[player]\n for key in player_size_team_cnts[player] if GOOD in key\n }\n )\n player_size_bad_pcts[player] = defaultdict(\n int,\n {\n key[0]: player_size_team_cnts[player][key] / player_game_cnts[player]\n for key in player_size_team_cnts[player] if BAD in key\n }\n )\n \n player_win_pct_evs = {}\n for player in player_size_good_pcts:\n if player_game_cnts[player] < thresh:\n continue\n ev = 0\n for size in player_size_good_pcts[player]:\n if size not in good_win_rates:\n continue\n ev += player_size_good_pcts[player][size] * good_win_rates[size] + player_size_bad_pcts[player][size] * (1 - good_win_rates[size])\n \n player_win_pct_evs[player] = ev\n \n return pd.DataFrame(player_win_pct_evs.items(), columns=[\"Player\", \"Expected Win %\"]).sort_values(\"Expected Win %\", ascending=False)",
"def win_percentage(self, return_on_div_by_zero='n/a'):\n if self.battle_count() > 0:\n return 100.0 * float(self.win_count()) / float(self.battle_count())\n else:\n return return_on_div_by_zero"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Total Rebound Percentage | def set_total_reb_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
player_rebounds = bx["reb_def"] + bx["reb_of"]
team_rebounds = team["reb_def"] + team["reb_of"]
opp_team_rebounds = opp_team["reb_def"] + opp_team["reb_of"]
result = 0.00
try:
if bx["minutes"] > 0 and bx["minutes"] > 0:
result = ((player_rebounds * (team["minutes"]/5)) / (bx["minutes"] * (team_rebounds + opp_team_rebounds)))*100
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
self.total_reb_percentage = "%.2f" % round(result, 2) | [
"def calc_percentages(self):\n self.beyond_lower.calc_percentage(self.total_entries)\n for b in self.buckets:\n b.calc_percentage(self.total_entries)\n self.beyond_upper.calc_percentage(self.total_entries)",
"def total_rebounds_percentage(self, total_rebounds_percentage):\n\n self._total_rebounds_percentage = total_rebounds_percentage",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def calc_percentage(self, total_entries):\n self.percentage = self.count/total_entries * 100",
"def _getPercentage(self):\n\t\treturn float(self.value)/float(self.maxvalue-self.minvalue)",
"def set_total_reb_def_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_def\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_def\"] + opp_team[\"reb_of\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_def_percentage = \"%.2f\" % round(result, 2)",
"def pct_of(pct, total):\n return pct * 0.01 * total",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def ppp_score_percentage(self):\r\n objects = self.__get_objects()\r\n z1 = str(objects[1]).strip().split()\r\n z2 = float(z1[14]) # will be the floating point number 8.3\r\n db = binom.rvs(n=10, p=z2, size=10000)\r\n a = np.array(db)\r\n b = np.mean(a)*100/10\r\n return b",
"def calculate_percentage(val, total):\n percent = np.divide(val, total)\n\n return percent",
"def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)",
"def calculate_waste_percentage():\n re = 0\n for aa in range(0, total_number_of_products):\n for bb in range(0, total_number_of_steps):\n re += rework[aa][bb]\n print(re)\n waste_p = re/(total_number_of_steps*total_number_of_products)\n return waste_p",
"def calculate_percent(value, total):\n return int(value / total * 100)",
"def update_percent(self):",
"def _calculate_final_percent(revenue, salary_group):\n\n percent = 0\n\n if salary_group == 1:\n percent = 0.5\n\n if revenue > 10000:\n percent = 0.6\n elif revenue > 5000:\n percent = 0.55\n elif salary_group == 2:\n percent = 0.3\n\n if revenue > 20000:\n percent = 0.4\n elif revenue > 10000:\n percent = 0.35\n\n return percent",
"def cal_total_grade(x):\r\n #Sum of all provided grade as all hold equal weight (25%) of the final grade\r\n total_grade = sum(x[2:])\r\n return total_grade",
"def total_rebounds_per_poss(self):\n return self._total_rebounds_per_poss",
"def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True",
"def patrimony_total(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Total Rebound Defensive Percentage | def set_total_reb_def_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
result = 0.00
try:
if bx["minutes"] > 0 and bx["minutes"] > 0:
result = ((bx["reb_def"] * (team["minutes"]/5)) / (bx["minutes"] * (team["reb_def"] + opp_team["reb_of"])))*100
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
self.total_reb_def_percentage = "%.2f" % round(result, 2) | [
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)",
"def calc_percentages(self):\n self.beyond_lower.calc_percentage(self.total_entries)\n for b in self.buckets:\n b.calc_percentage(self.total_entries)\n self.beyond_upper.calc_percentage(self.total_entries)",
"def defensive_rebounds_percentage(self, defensive_rebounds_percentage):\n\n self._defensive_rebounds_percentage = defensive_rebounds_percentage",
"def percent_remaining(self):\n return self.remaining_demand / self.requested_energy",
"def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)",
"def _getPercentage(self):\n\t\treturn float(self.value)/float(self.maxvalue-self.minvalue)",
"def calculate_waste_percentage():\n re = 0\n for aa in range(0, total_number_of_products):\n for bb in range(0, total_number_of_steps):\n re += rework[aa][bb]\n print(re)\n waste_p = re/(total_number_of_steps*total_number_of_products)\n return waste_p",
"def get_free_set_percentage(self, params):\n raise NotImplementedError()",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def total_rebounds_percentage(self, total_rebounds_percentage):\n\n self._total_rebounds_percentage = total_rebounds_percentage",
"def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True",
"def calc_percentage(self, total_entries):\n self.percentage = self.count/total_entries * 100",
"def pct_of(pct, total):\n return pct * 0.01 * total",
"def _calculate_total_bonus(self):\n if self.estimated_income < self.target_income:\n return 0\n else:\n pass",
"def defensive_rebounds_per_poss(self):\n return self._defensive_rebounds_per_poss",
"def offensive_rebounds_percentage(self, offensive_rebounds_percentage):\n\n self._offensive_rebounds_percentage = offensive_rebounds_percentage",
"def _total_fullness(self) -> float:\n # TODO: Complete this method.\n pass",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Steals Percentage of a player | def set_steals_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
poss = self.get_team_possessions()
result = 0.00
if bx["minutes"] > 0:
result = ((bx["steals"] * (team["minutes"]/Decimal('5'))) / Decimal(float(bx["minutes"]) * poss)) * 100
self.steals_percentage = "%.2f" % round(result, 2) | [
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def win_percentage(self, return_on_div_by_zero='n/a'):\n if self.battle_count() > 0:\n return 100.0 * float(self.win_count()) / float(self.battle_count())\n else:\n return return_on_div_by_zero",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def materialPercentage(material,player):\n whitescore,blackscore = material\n if player == 0:\n return (whitescore/(blackscore+whitescore))\n else:\n return (blackscore/(blackscore+whitescore))",
"def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)",
"def percentage_step(self) -> float:\n return 1",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def loss_percentage(self, return_on_div_by_zero='n/a'):\n if self.battle_count() > 0:\n return 100.0 * float(self.loss_count()) / float(self.battle_count())\n else:\n return return_on_div_by_zero",
"def get_winrate(self, player: int) -> float:\r\n return self.__data[player, 5]",
"def win_pct(self):\n wins = self.wins_money\n losses = self.losses_money\n try:\n return '{0:.0f} percent'.format((wins / (wins + losses)) * 100)\n except ZeroDivisionError:\n return \"N/A\"",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def percent_score(self):\n return self.score * 100",
"def water_percentage(self):\n water = 1.00\n for ingredient in self.ingredients:\n water -= ingredient.ratio\n return round(water, 2)",
"def pctStartingHand(self, round_=None):\n\t\tif round_ is None:\n\t\t\treturn self.pStartingHand() * 100\n\t\telse:\n\t\t\treturn round(self.pStartingHand() * 100, round_)",
"def set_ts_percentage(self):\n bx = self.get_standard_stats()\n ptos = float(bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tsAttempts = float(tcInt + (0.44*float(bx[\"tl_int\"])))\n result = 0.00\n if tsAttempts > 0.00:\n result = (ptos/(2*tsAttempts))*100\n self.ts_percentage = \"%.2f\" % round(result, 2)",
"def per_hour(self):\n if self.is_salary():\n return 0.0\n return self.wage_cents / 100.0",
"def percentage(self) -> float:\n if self.num_total() == 0:\n raise RuntimeError(\n \"Yield::percentage is undefined when Yield::num_total() == 0\"\n )\n\n return float(self.num_success_) / float(self.num_total())",
"def opp_strength(team):\n opponent_count = 0\n opponent_wins = 0\n\n gov_rounds = team.gov_team\n opp_rounds = team.opp_team\n\n for round_obj in gov_rounds.all():\n opponent_wins += tot_wins(round_obj.opp_team)\n opponent_count += 1\n for round_obj in opp_rounds.all():\n opponent_wins += tot_wins(round_obj.gov_team)\n opponent_count += 1\n\n if opponent_count > 0:\n return float(opponent_wins) / float(opponent_count)\n else:\n return 0.0",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Assists Percentage of a player | def set_assists_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
team_tc_conv = team["t2p_conv"] + team["t3p_conv"]
player_tc_conv = bx["t2p_conv"] + bx["t3p_conv"]
result = 0.00
try:
if bx["minutes"] > 0:
result = (bx["assists"] / (((bx["minutes"] / (team["minutes"] / 5)) * team_tc_conv) - player_tc_conv))*100
result = result if result <= 100 and result >= 0 else 0
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
except InvalidOperation:
print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC)
self.assists_percentage = "%.2f" % round(result, 2) | [
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def percent_score(self):\n return self.score * 100",
"def calc_percentage(self, total_entries):\n self.percentage = self.count/total_entries * 100",
"def win_percentage(self, return_on_div_by_zero='n/a'):\n if self.battle_count() > 0:\n return 100.0 * float(self.win_count()) / float(self.battle_count())\n else:\n return return_on_div_by_zero",
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def get_opinion_percent(self):\n return (self.get_percent()+100)/2",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def materialPercentage(material,player):\n whitescore,blackscore = material\n if player == 0:\n return (whitescore/(blackscore+whitescore))\n else:\n return (blackscore/(blackscore+whitescore))",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def getHealthPercentage(self):\r\n return (self.pokemon.getCurrHP()*100)/self.pokemon.getStat(\"HP\")",
"def calc_assist_score(assists):\n return (((assists * ASSISTS_MULT) + ASSISTS_MOD) * assists) / ASSISTS_DIV",
"def get_player_health_percentage(self):\n self.set_player_health_percentage()\n return self.playerHealthPercentage",
"def percent():\n\tpercentage=(int(marks)/int(total))*100\n\tprint(\"\\nYour percentage is\",percentage)",
"def calculate_progress(self):\n stakeholders_count = self.meetingstakeholder_set.count()\n meeting_items_count = self.meetingitem_set.count()\n factors_count = self.factors.count()\n\n max_evaluations = stakeholders_count * meeting_items_count * factors_count\n total_evaluations = self.get_evaluations().count()\n\n if max_evaluations != 0:\n percentage = round((total_evaluations / float(max_evaluations)) * 100.0, 2)\n else:\n percentage = 0.0\n\n self.progress = percentage\n self.save()\n return self.progress",
"def set_assists_ratio(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n denominador = tcInt + (0.44 * float(bx[\"tl_int\"])) + float(bx[\"assists\"]) +float(bx[\"turnovers\"])\n numerador = float(bx[\"assists\"])\n result = 0.00\n if denominador > 0:\n result = (numerador / denominador) * 100\n self.assists_ratio = \"%.2f\" % round(result, 2)",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def percentage(moderated, accepted):\n if moderated == accepted:\n return 100\n elif accepted == 0:\n return 0\n else:\n return round(float(accepted) / moderated * 100)",
"def calculate_fitness_percentage(selection):\n return get_percentage(fitness_function(selection)/fitness_function(init_loot()))",
"def set_player_health_percentage(self):\n self.playerHealthPercentage = (self.get_current_health()\n / self.get_max_health())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Ratio Assists Per Turnover of a player | def set_assists_per_turnover(self):
bx = self.get_standard_stats()
ratio = bx["assists"]
if bx["turnovers"] > 0:
ratio = bx["assists"] / bx["turnovers"]
self.assists_per_turnover = "%.2f" % round(ratio, 2) | [
"def calc_ratio_of_moves(game, player):\n player_factor = 1\n opp_factor = 1\n player_moves = game.get_legal_moves(player)\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n if not opp_moves:\n return float(\"inf\")\n elif not player_moves:\n return float(\"-inf\")\n else:\n return float(player_factor * len(player_moves) / (opp_factor * len(opp_moves)))",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def set_assists_ratio(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n denominador = tcInt + (0.44 * float(bx[\"tl_int\"])) + float(bx[\"assists\"]) +float(bx[\"turnovers\"])\n numerador = float(bx[\"assists\"])\n result = 0.00\n if denominador > 0:\n result = (numerador / denominador) * 100\n self.assists_ratio = \"%.2f\" % round(result, 2)",
"def opp_strength(team):\n opponent_count = 0\n opponent_wins = 0\n\n gov_rounds = team.gov_team\n opp_rounds = team.opp_team\n\n for round_obj in gov_rounds.all():\n opponent_wins += tot_wins(round_obj.opp_team)\n opponent_count += 1\n for round_obj in opp_rounds.all():\n opponent_wins += tot_wins(round_obj.gov_team)\n opponent_count += 1\n\n if opponent_count > 0:\n return float(opponent_wins) / float(opponent_count)\n else:\n return 0.0",
"def opponent_match_win_percentage(self, standings=STANDINGS):\n tot = 0\n n = 0\n for opp in self.get_opponents(standings):\n n += 1\n tot += opp.match_win_percentage()\n return tot / (n or -1)",
"def getWinAttemptRatio(self) -> float:\n if self.attempts == 0:\n return 0\n return self.wins / self.attempts",
"def calculate_ratings(self):\n for match in self.matches:\n # Fetch the corresponding participants\n winner = self.participants[match.id_winner]\n loser = self.participants[match.id_loser]\n # Save their original ratings\n old_winner_rating = winner.rating\n old_loser_rating = loser.rating\n # Calculate new ratings\n winner.won(old_loser_rating)\n loser.lost(old_winner_rating)",
"def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def opponent_game_win_percentage(self, standings=STANDINGS):\n tot = 0\n n = 0\n for opp in self.get_opponents(standings):\n n += 1\n tot += opp.game_win_percentage()\n return tot / (n or -1)",
"def player_pair_vs_win_pcts(thresh=SAMPLE_THRESH, rounding=2, ex_ch=False, df=None):\n if df is None:\n df = fetch_game_log(parse_cols=True)\n if ex_ch:\n df = df[~(df[CHEESY_WIN] == 'Yes')]\n \n player_cnts = defaultdict(int)\n player_pair_opp_cnts = defaultdict(lambda: defaultdict(int))\n player_pair_win_cnts = defaultdict(lambda: defaultdict(int))\n for idx, row in df.iterrows():\n for role1 in ROLES:\n player1 = row[role1]\n player_cnts[player1] += 1\n for role2 in ROLES:\n if role1 == role2:\n continue\n player2 = row[role2]\n if player1 in [UNK, NA] or player2 in [UNK, NA]:\n continue\n\n if role1 in BADS and role2 not in BADS:\n player_pair_opp_cnts[player1][player2] += 1\n if row[WINNER] == BAD:\n player_pair_win_cnts[player1][player2] += 1\n if role1 not in BADS and role2 in BADS:\n player_pair_opp_cnts[player1][player2] += 1\n if row[WINNER] == GOOD:\n player_pair_win_cnts[player1][player2] += 1\n \n df = []\n players = list(player_pair_opp_cnts.keys())\n players = list(filter(lambda p: player_cnts[p] >= thresh, players))\n for p1 in players:\n df.append([])\n for p2 in players:\n if p1 == p2:\n df[-1].append(-1)\n elif player_pair_opp_cnts[p1][p2] >= thresh:\n df[-1].append(player_pair_win_cnts[p1][p2] / player_pair_opp_cnts[p1][p2])\n else:\n df[-1].append(-1)\n \n df = pd.DataFrame(df, columns=players, index=players)\n df = df.apply(lambda x: np.round(x, rounding))\n df.index = df.index.rename(\"Player\")\n \n return df.reset_index()",
"def get_win_rate(summoner_name, champion):\n\tID = get_account_ID(summoner_name)\n\tchamp_ID = get_champ_ID(champion)\n\tmatch_list = get_game_list(ID, champ_ID)\n\twins = 0.0\n\tfor i in match_list:\n\t\tif get_result(i, champ_ID) == True:\n\t\t\twins = wins + 1\n\treturn wins/len(match_list)",
"def update_win_ratio(self):\n if self.losses + self.wins == 0:\n self.win_ratio = 0.0\n else:\n self.win_ratio = float(self.wins)/(self.wins+self.losses)",
"def match_win_percentage(self, omit_bye=True):\n n = float(len(self.previous_opponents))\n mp = self.match_points\n if omit_bye and 'BYE' in self.previous_opponents:\n mp -= 3\n n -= 1\n return max(0.33, mp / (3 * n or -1))",
"def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp",
"def get_winrate(self, player: int) -> float:\r\n return self.__data[player, 5]",
"def get_overall_ratio(self, overall_wins, overall_losses) -> float:\n if overall_wins == 0 and overall_losses == 0:\n return 0\n else:\n return overall_wins / (overall_wins + overall_losses) * 100",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Assists Ratio of a player | def set_assists_ratio(self):
bx = self.get_standard_stats()
tcInt = float(bx["t2p_int"] + bx["t3p_int"])
denominador = tcInt + (0.44 * float(bx["tl_int"])) + float(bx["assists"]) +float(bx["turnovers"])
numerador = float(bx["assists"])
result = 0.00
if denominador > 0:
result = (numerador / denominador) * 100
self.assists_ratio = "%.2f" % round(result, 2) | [
"def calc_ratio_of_moves(game, player):\n player_factor = 1\n opp_factor = 1\n player_moves = game.get_legal_moves(player)\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n if not opp_moves:\n return float(\"inf\")\n elif not player_moves:\n return float(\"-inf\")\n else:\n return float(player_factor * len(player_moves) / (opp_factor * len(opp_moves)))",
"def funding_ratio(assets, liabilities, r):\n return pv(assets, r)/pv(liabilities, r)",
"def calculate_ratio(self):\r\n current_height = self.gameboard.winfo_height()\r\n current_width = self.gameboard.winfo_width()\r\n self.height_ratio = current_height / self.board_height\r\n self.width_ratio = current_width / self.board_width",
"def GetRatio(self):\n ...",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def calc_assist_score(assists):\n return (((assists * ASSISTS_MULT) + ASSISTS_MOD) * assists) / ASSISTS_DIV",
"def getWinAttemptRatio(self) -> float:\n if self.attempts == 0:\n return 0\n return self.wins / self.attempts",
"def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()",
"def gain_ratio(attribute_name_var, instances_var):\n gain_value = information_gain(attribute_name_var, instances_var)\n intrinsic_information_value = intrinsic_information(attribute_name_var, instances_var)\n\n gain_ratio_value = gain_value / intrinsic_information_value\n return gain_ratio_value",
"def get_usage_ratio() :\n return float(AthleteResults._athlete_results_counter # Ratio of how often AthleteResults subclass was called/used \n / AthleteResults._processing_counter)",
"def set_assists_per_turnover(self):\n bx = self.get_standard_stats()\n ratio = bx[\"assists\"]\n if bx[\"turnovers\"] > 0:\n ratio = bx[\"assists\"] / bx[\"turnovers\"]\n self.assists_per_turnover = \"%.2f\" % round(ratio, 2)",
"def calculate_ratings(self):\n for match in self.matches:\n # Fetch the corresponding participants\n winner = self.participants[match.id_winner]\n loser = self.participants[match.id_loser]\n # Save their original ratings\n old_winner_rating = winner.rating\n old_loser_rating = loser.rating\n # Calculate new ratings\n winner.won(old_loser_rating)\n loser.lost(old_winner_rating)",
"def materialPercentage(material,player):\n whitescore,blackscore = material\n if player == 0:\n return (whitescore/(blackscore+whitescore))\n else:\n return (blackscore/(blackscore+whitescore))",
"def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def _calculate_acceptance_ratio(self):\n return torch.exp(\n self.params_nlp +\n self._momentum_density(self.momentums) -\n self.candidates_nlp -\n self._momentum_density(self.candidates_momentums))",
"def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)",
"def total_examined_ratio(self):\n return self._total_examined_ratio",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Defensive Ratio of a player. The total points received in 100 possessions | def set_defensive_ratio(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
if bx["minutes"] > 0:
opp_fga = opp_team["t2p_int"] + opp_team["t3p_int"]
opp_fgm = opp_team["t2p_conv"] + opp_team["t3p_conv"]
try:
dor = Decimal(opp_team["reb_of"] / (opp_team["reb_of"] + team["reb_def"]))
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
dor = 0
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
dor = 0
try:
dfg = Decimal(opp_fgm / opp_fga)
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
dfg = 0
try:
fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))
except:
fmwt = 0
stops1 = bx["steals"] + bx["block_shots"] * fmwt * (1 - Decimal('1.07') * dor) + bx["reb_def"] * (1 - fmwt)
try:
stops2 = (Decimal((opp_fga - opp_fgm - team["block_shots"]) / team["minutes"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team["turnovers"] - team["steals"]) / team["minutes"])) * bx["minutes"] + Decimal(bx["fouls_cm"] / team["fouls_cm"]) * Decimal('0.4') * opp_team["tl_int"] * (1 - Decimal(opp_team["tl_conv"] / opp_team["tl_int"]))**2
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
stops2 = 0
except InvalidOperation:
print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC)
stops2 = 0
stops = stops1 + stops2
poss = self.get_team_possessions()
if bx["minutes"] > 0:
stop_percentage = (float(stops) * float(opp_team["minutes"])) / (float(poss) * float(bx["minutes"]))
else:
stop_percentage = 0.00
opp_points = opp_team["t2p_conv"] * 2 + opp_team["t3p_conv"] * 3 + opp_team["tl_conv"]
team_defensive_rating = 100 * (float(opp_points) / poss)
try:
d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team["tl_conv"]) / float(opp_team["tl_int"])))**2) * float(opp_team["tl_int"])*0.4)
result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
d_pts_per_scposs = 0
result = 0.00
# print("dor: " + str(dor))
# print("dfg: " + str(dfg))
# print("fmwt: " + str(fmwt))
# print("stops1: " + str(stops1))
# print("stops2: " + str(stops2))
# print("stops: " + str(stops))
# print("poss: " + str(poss))
# print("stop_percentage: " + str(stop_percentage))
# print("opp_points: " + str(opp_points))
# print("team_defensive_rating: " + str(team_defensive_rating))
# print("d_pts_per_scposs: " + str(d_pts_per_scposs))
# print("drtg: " + str(result) + "\n")
else:
result = 0.00
self.drtg = "%.2f" % round(result, 2) | [
"def calc_ratio_of_moves(game, player):\n player_factor = 1\n opp_factor = 1\n player_moves = game.get_legal_moves(player)\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n if not opp_moves:\n return float(\"inf\")\n elif not player_moves:\n return float(\"-inf\")\n else:\n return float(player_factor * len(player_moves) / (opp_factor * len(opp_moves)))",
"def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount",
"def game_win_percentage(self, game_points_for_bye=GAME_POINTS_PER_BYE, omit_bye=True):\n n = float(self.games_played)\n gp = self.game_points\n if omit_bye and 'BYE' in self.previous_opponents:\n gp -= game_points_for_bye\n return max(0.33, gp / (3 * n or -1))",
"def edp_reward(self) -> float:",
"def percentage(moderated, accepted):\n if moderated == accepted:\n return 100\n elif accepted == 0:\n return 0\n else:\n return round(float(accepted) / moderated * 100)",
"def ratio(self):\n total_com = 0\n for item in self.metric_data:\n total_com += item[\"comm_vol\"]\n return self.fast_memory_use / (total_com + self.eps)",
"def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100",
"def materialPercentage(material,player):\n whitescore,blackscore = material\n if player == 0:\n return (whitescore/(blackscore+whitescore))\n else:\n return (blackscore/(blackscore+whitescore))",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def get_winrate(self, player: int) -> float:\r\n return self.__data[player, 5]",
"def _calculate_acceptance_ratio(self):\n self.candidates_nlp = self.closure()\n return torch.exp(self.params_nlp - self.candidates_nlp)",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def player_pnr_roll_def():\n endpoint = 'http://stats.nba.com/js/data/playtype/player_PRRollMan.js'\n response = utils.get_response(endpoint, None)\n return response['Deffensive']",
"def funding_ratio(assets, liabilities, r):\n return pv(assets, r)/pv(liabilities, r)",
"def lastProgessRate(self):\n\t\tp1 = self.tracker[-2]\n\t\tp2 = self.tracker[-1]\n\t\treturn (p1[1].cost - p2[1].cost) / (p2[0] - p1[0]) if len(self.tracker) > 1 else 0.0",
"def expected_value(held_dice, num_die_sides, num_free_dice):\r\n temp = gen_all_sequences(tuple(range(1, num_die_sides + 1)), num_free_dice)\r\n total = 0\r\n \r\n for item in temp:\r\n total += score(held_dice + item)\r\n return float(total)/len(temp)",
"def KPI(self, total=True):\n \n data = self.select_table('ChordLog')\n correct = data[data['PredictedLabel'] == data['ActualLabel']]\n\n # % correctly predicted in chord net\n human_level_performance = (len(correct) / len(data)) * 100\n \n # round value\n human_level_performance = round(human_level_performance, 4) \n \n return human_level_performance"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method which calculate Offensive Ratio of a player. The total points scored in 100 possessions | def set_offensive_ratio(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
if bx["minutes"] > 0 and (bx["t2p_int"] + bx["t3p_int"]) > 0:
fgm = bx["t2p_conv"] + bx["t3p_conv"]
fga = bx["t2p_int"] + bx["t3p_int"]
team_fgm = team["t2p_conv"] + team["t3p_conv"]
team_fga = team["t2p_int"] + team["t3p_int"]
team_points = team["t2p_conv"]*2 + team["t3p_conv"]*3 + team["tl_conv"]
points = bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"]
try:
qAST = (Decimal(bx["minutes"] / (team["minutes"] / 5)) * (Decimal('1.14') * Decimal((team["assists"] - bx["assists"]) / team_fgm))) + \
Decimal((((team["assists"] / team["minutes"]) * bx["minutes"] * 5 - bx["assists"]) / ((team_fgm / team["minutes"]) * bx["minutes"] * 5 - fgm)) * (1 - (bx["minutes"] / (team["minutes"] / 5))))
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
qAST = 1
except InvalidOperation:
print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC)
qAST = 1
fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx["tl_conv"]) / (2 * fga)) * qAST)
try:
ast_part = Decimal('0.5') * Decimal(((team_points - team["tl_conv"]) - (points - bx["tl_conv"])) / (2*(team_fga - fga))) * bx["assists"]
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
ast_part = 0
if bx["tl_int"] > 0:
ft_part = Decimal(1 - (1 - (bx["tl_conv"] / bx["tl_int"]))**2) * Decimal('0.4') * bx["tl_int"]
else:
ft_part = 0
team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team["tl_conv"] / team["tl_int"]))**2) * team["tl_int"] * Decimal('0.4'))
try:
team_orb_percentage = Decimal(team["reb_of"] / (team["reb_of"] + ((opp_team["reb_def"] + opp_team["reb_of"]) - opp_team["reb_of"])))
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
team_orb_percentage = 0
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
team_orb_percentage = 0
team_play_percentage = Decimal(team_scoring_poss / (team_fga + team["tl_int"] * Decimal('0.4') + team["turnovers"]))
try:
team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
team_orb_weight = 0
orb_part = bx["reb_of"] * team_orb_weight * team_play_percentage
fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)
if bx["tl_conv"] > 0:
ft_x_poss = Decimal((1 - (bx["tl_conv"] / bx["tl_int"]))**2) * Decimal('0.4') * bx["tl_int"]
else:
ft_x_poss = Decimal(1 - (bx["tl_conv"] / 1)**2) * Decimal('0.4') * bx["tl_int"]
try:
sc_poss = (fg_part + ast_part + ft_part) * (1 - (team["reb_of"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
sc_poss =0
tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx["turnovers"]
pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx["t3p_conv"]) * (1 - Decimal('0.5') * Decimal((points - bx["tl_conv"]) / (2 * fga)) * qAST)
try:
pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team["t3p_conv"] - bx["t3p_conv"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team["tl_conv"]) - (points - bx["tl_conv"])) / (2 * (team_fga - fga))) * bx["assists"]
except:
pprod_ast_part = 0
pprod_orb_part = bx["reb_of"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team["tl_conv"] / team["tl_int"])**2) * Decimal('0.4') * team["tl_int"]))
try:
pprod = (pprod_fg_part + pprod_ast_part + bx["tl_conv"]) * (1 - (team["reb_of"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
pprod = 0
try:
result = 100 * (pprod / tot_poss)
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
result = 0
# print("fgm: " + str(fgm))
# print("fga: " + str(fga))
# print("team_fgm: " + str(team_fgm))
# print("team_fga: " + str(team_fga))
# print("team_points: " + str(team_points))
# print("points: " + str(points))
# print("qAST: " + str(qAST))
# print("fg_part: " + str(fg_part))
# print("ast_part: " + str(ast_part))
# print("ft_part: " + str(ft_part))
# print("team_scoring_poss: " + str(team_scoring_poss))
# print("team_orb_percentage: " + str(team_orb_percentage))
# print("team_play_percentage: " + str(team_play_percentage))
# print("team_orb_weight: " + str(team_orb_weight))
# print("orb_part: " + str(orb_part))
# print("fg_x_poss: " + str(fg_x_poss))
# print("ft_x_poss: " + str(ft_x_poss))
# print("sc_poss: " + str(sc_poss))
# print("tot_poss: " + str(tot_poss))
# print("pprod_fg_part: " + str(pprod_fg_part))
# print("pprod_ast_part: " + str(pprod_ast_part))
# print("pprod_orb_part: " + str(pprod_orb_part))
# print("pprod: " + str(pprod))
# print("result: " + str(result) + "\n")
else:
result = 0.00
self.ortg = "%.2f" % round(result, 2)
if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:
"""For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG
using team's formula"""
print(BCOLORS.OKBLUE + "ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo" + BCOLORS.ENDC)
bx = self.get_standard_stats()
result = round((bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"])/self.get_team_possessions(), 2)
self.ortg = "%.2f" % result | [
"def calc_ratio_of_moves(game, player):\n player_factor = 1\n opp_factor = 1\n player_moves = game.get_legal_moves(player)\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n if not opp_moves:\n return float(\"inf\")\n elif not player_moves:\n return float(\"-inf\")\n else:\n return float(player_factor * len(player_moves) / (opp_factor * len(opp_moves)))",
"def offensive_rating(data_frame, mode):\n off_rat = dict()\n average_points = calculate_average_points(data_frame, mode)\n for k, possessions in possessions_home_away(data_frame, mode).items():\n try:\n off_rat[k] = format(float(average_points[k]) * 100 / float(possessions), '.2f')\n except ZeroDivisionError:\n off_rat[k] = 0.0\n return off_rat",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def formula(user_follows_opinion, total_followers_opinion, metadata):\n score = 0\n\n for politician in user_follows_opinion:\n score += 1 - (metadata['individual_follower_count'][politician]/total_followers_opinion)\n return score",
"def get_winrate(self, player: int) -> float:\r\n return self.__data[player, 5]",
"def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100",
"def game_win_percentage(self, game_points_for_bye=GAME_POINTS_PER_BYE, omit_bye=True):\n n = float(self.games_played)\n gp = self.game_points\n if omit_bye and 'BYE' in self.previous_opponents:\n gp -= game_points_for_bye\n return max(0.33, gp / (3 * n or -1))",
"def get_opinion_percent(self):\n return (self.get_percent()+100)/2",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)",
"def resp_rate(self):\r\n props = np.zeros(self.q_no)\r\n for i in range(self.q_no):\r\n props[i] = self.prop_answered(i)\r\n \r\n return np.average(props)",
"def opp_strength(team):\n opponent_count = 0\n opponent_wins = 0\n\n gov_rounds = team.gov_team\n opp_rounds = team.opp_team\n\n for round_obj in gov_rounds.all():\n opponent_wins += tot_wins(round_obj.opp_team)\n opponent_count += 1\n for round_obj in opp_rounds.all():\n opponent_wins += tot_wins(round_obj.gov_team)\n opponent_count += 1\n\n if opponent_count > 0:\n return float(opponent_wins) / float(opponent_count)\n else:\n return 0.0",
"def get_overall_ratio(self, overall_wins, overall_losses) -> float:\n if overall_wins == 0 and overall_losses == 0:\n return 0\n else:\n return overall_wins / (overall_wins + overall_losses) * 100",
"def match_win_percentage(self, omit_bye=True):\n n = float(len(self.previous_opponents))\n mp = self.match_points\n if omit_bye and 'BYE' in self.previous_opponents:\n mp -= 3\n n -= 1\n return max(0.33, mp / (3 * n or -1))",
"def _calculate_acceptance_ratio(self):\n self.candidates_nlp = self.closure()\n return torch.exp(self.params_nlp - self.candidates_nlp)",
"def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()",
"def peirce_skill_score(self):\n n = float(self.table.sum())\n nf = self.table.sum(axis=1)\n no = self.table.sum(axis=0)\n correct = float(self.table.trace())\n return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2)",
"def player_efficiency_rating(self):\n return self._player_efficiency_rating",
"def funding_ratio(assets, liabilities, r):\n return pv(assets, r)/pv(liabilities, r)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
auth_data will be used used as request_data in strategy | def set_input_data(self, request, auth_data):
request.auth_data = auth_data | [
"def get_oauth_data():",
"def authenticate(self, request):\n auth_data = super().authenticate(request)\n if not auth_data:\n return auth_data\n\n user, auth = auth_data\n\n if amr_claim := auth.data.get(\"amr\"):\n user.token_amr_claim = amr_claim\n\n return user, auth",
"def _get_auth_data(self, username):\n profile = model_access.get_profile(username)\n # get user's basic data\n url = self.settings.OZP['OZP_AUTHORIZATION']['USER_INFO_URL'] % (profile.dn, profile.issuer_dn)\n server_crt = self.settings.OZP['OZP_AUTHORIZATION']['SERVER_CRT']\n server_key = self.settings.OZP['OZP_AUTHORIZATION']['SERVER_KEY']\n r = self.requests.get(url, cert=(server_crt, server_key), verify=False)\n # logger.debug('hitting url %s for user with dn %s' % (url, profile.dn), extra={'request':request})\n\n if r.status_code != 200:\n raise errors.AuthorizationFailure('Error contacting authorization server: {0!s}'.format(r.text))\n\n user_data = r.json()\n\n user_json_keys = ['dn', 'formalAccesses', 'clearances', 'dutyorg', 'visas']\n for user_key in user_json_keys:\n if user_key not in user_data:\n raise ValueError('Endpoint {0!s} not return value output - missing key: {1!s}'.format(url, user_key))\n\n # convert dutyorg -> duty_org\n user_data['duty_org'] = user_data['dutyorg']\n user_data.pop('dutyorg', None)\n\n # convert formalAcccesses -> formal_accesses\n user_data['formal_accesses'] = user_data['formalAccesses']\n user_data.pop('formalAccesses', None)\n\n # get groups for user\n url = self.settings.OZP['OZP_AUTHORIZATION']['USER_GROUPS_URL'] % (profile.dn, self.settings.OZP['OZP_AUTHORIZATION']['PROJECT_NAME'])\n # logger.debug('hitting url %s for user with dn %s for group info' % (url, profile.dn), extra={'request':request})\n r = self.requests.get(url, cert=(server_crt, server_key), verify=False)\n if r.status_code != 200:\n raise errors.AuthorizationFailure('Error contacting authorization server: {0!s}'.format(r.text))\n\n group_data = r.json()\n\n if 'groups' not in group_data:\n raise ValueError('Endpoint {0!s} not return value output - missing key: {1!s}'.format(url, 'groups'))\n\n groups = group_data['groups']\n user_data['is_org_steward'] = False\n user_data['is_apps_mall_steward'] = False\n user_data['is_metrics_user'] = False\n user_data['is_beta_user'] = False\n\n for g in groups:\n if self.settings.OZP['OZP_AUTHORIZATION']['APPS_MALL_STEWARD_GROUP_NAME'] == utils.find_between(g, 'cn=', ','):\n user_data['is_apps_mall_steward'] = True\n if self.settings.OZP['OZP_AUTHORIZATION']['ORG_STEWARD_GROUP_NAME'] == utils.find_between(g, 'cn=', ','):\n user_data['is_org_steward'] = True\n if self.settings.OZP['OZP_AUTHORIZATION']['METRICS_GROUP_NAME'] == utils.find_between(g, 'cn=', ','):\n user_data['is_org_steward'] = True\n if self.settings.OZP['OZP_AUTHORIZATION']['BETA_USER_GROUP_NAME'] == utils.find_between(g, 'cn=', ','):\n user_data['is_beta_user'] = True\n return user_data",
"def add_auth(self, http_request):\r\n pass",
"def get_auth_data(self, headers):\n if self.token is None:\n auth = {'password': self.password}\n if self.user_id:\n auth['userId'] = self.user_id\n elif self.user_name:\n auth['username'] = self.user_name\n return {'passwordCredentials': auth}\n headers['X-Auth-Token'] = self.token\n return {'token': {'id': self.token}}",
"def auth_data(server_ip):\n pass",
"def fake_auth_complete(self, strategy):\r\n args = ()\r\n kwargs = {\r\n 'request': strategy.request,\r\n 'backend': strategy.backend,\r\n 'user': None,\r\n 'response': self.get_response_data(),\r\n }\r\n return strategy.authenticate(*args, **kwargs)",
"def auth_extra_arguments(self):\n extra_arguments = super().auth_extra_arguments()\n extra_arguments[\"p\"] = self.policy or self.data.get(\"p\")\n return extra_arguments",
"def __auth_data(self):\n return {'customer_name': self.customer, 'user_name': self.username,\n 'password': self.__cipher.decrypt(self.password)}",
"def set_auth_data(self, auth_data):\n d_auth_data = Data(auth_data)\n self._lib_vscf_aes256_gcm.vscf_aes256_gcm_set_auth_data(self.ctx, d_auth_data.data)",
"def get_data(self, **request_parameters):\n\n if self._is_auth_request(**request_parameters):\n return {\n 'username': request_parameters['username'],\n 'password': request_parameters['password']\n }\n else:\n return request_parameters[\"data\"]",
"def _get_auth_string(self):",
"def auth_ok(self, data):\n\n pass",
"def update_auth_data(self, auth_data: AuthData) -> None:\n self.auth_data.update(auth_data)\n if \"refresh_id\" in self.auth_data:\n self.set_cookie(COOKIE_NAME, self.auth_data[\"refresh_id\"])\n if self.on_auth_data_changed:\n self.on_auth_data_changed(self.auth_data)",
"def get_auth_data():\n result = {\n 'storage_url': None,\n 'auth_token': None}\n\n endpoint_config = UserAuthConfig()\n user_config = UserConfig()\n objectstorage_config = ObjectStorageConfig()\n auth_provider = AuthProvider()\n access_data = auth_provider.get_access_data(\n endpoint_config, user_config)\n\n if endpoint_config.strategy.lower() == 'saio_tempauth':\n result['storage_url'] = access_data.storage_url\n result['auth_token'] = access_data.auth_token\n else:\n service = access_data.get_service(\n objectstorage_config.identity_service_name)\n endpoint = service.get_endpoint(objectstorage_config.region)\n result['storage_url'] = endpoint.public_url\n result['auth_token'] = access_data.token.id_\n\n return result",
"def get_auth_data(self, session, auth, headers, **kwargs):\n headers['Openstack-Auth-Receipt'] = self.receipt\n return (None, None)",
"def get_request_and_strategy(self, auth_entry=None, redirect_uri=None):\r\n request = self.request_factory.get(\r\n pipeline.get_complete_url(self.backend_name) +\r\n '?redirect_state=redirect_state_value&code=code_value&state=state_value')\r\n request.user = auth_models.AnonymousUser()\r\n request.session = cache.SessionStore()\r\n request.session[self.backend_name + '_state'] = 'state_value'\r\n\r\n if auth_entry:\r\n request.session[pipeline.AUTH_ENTRY_KEY] = auth_entry\r\n\r\n strategy = social_utils.load_strategy(backend=self.backend_name, redirect_uri=redirect_uri, request=request)\r\n request.social_strategy = strategy\r\n\r\n return request, strategy",
"def UserAuthorizationRequestReceived(self, User):",
"def set_auth(self, request, auth):\n request.auth = auth"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that only 'admin' can add a product | def test_only_admin_can_create_product(self):
resp = self.admin_create_user()
reply = self.attendant_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Unauthorized Access!')
self.assertEqual(resp.status_code, 401) | [
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_admin_product_info(self):\n self.add_product(self.TESTPRODUCT1, 1)\n\n # Missing product\n rv = self.app.get('/admin/product/nothing', follow_redirects=True)\n assert b'Produkten existerar inte!' in rv.data\n\n # Existing product\n rv = self.app.get('/admin/product/%s' % self.TESTPRODUCT1['barcode'], follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data",
"def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)",
"def setUp(self):\r\n super(EditProductTest, self).setUp()\r\n self.product = self.F.ProductFactory.create()\r\n self.add_perm(\"manage_products\")",
"def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_create_sale_as_admin(self):\n response = self.client().post(self.route_products,\n headers=self.generate_admin_token(),\n data=json.dumps(self.product),\n content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('Product Successfully Created', str(response.data))\n response = self.client().post(self.route_sales,\n headers=self.generate_admin_token(),\n data=json.dumps(self.sale),\n content_type='application/json')\n self.assertEqual(response.status_code, 403)\n self.assertIn('Sorry, this route is only accessible to sales attendants', str(response.data))",
"def test_admin_product_list(self):\n # No products\n rv = self.app.get('/admin/product')\n assert rv.status_code == 200\n\n # More than 0 products\n self.add_product(self.TESTPRODUCT1, 1)\n self.add_product(self.TESTPRODUCT2, 2)\n rv = self.app.get('/admin/product', follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data\n assert self.TESTPRODUCT2['name'] in rv.data",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin(self):\n assert(admin)",
"def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()",
"def test_add_product(self):\n self.api_client.login(username=self.username, password=self.password)\n path = reverse('product:product_add')\n response = self.api_client.get(\n path\n )\n form = response.context['form']\n self.assertTemplateUsed(response, 'product/add-product.html')\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(form, ProductForm)",
"def test_c_dont_adds_if_already_in_cart(self, client, product, admin_user):\n cart = Cart.objects.create(owner=admin_user)\n Line.objects.create(cart=cart, product=product)\n\n client.force_login(admin_user)\n\n response = client.post(\n reverse('shoppingcart:add-product'),\n json.dumps({'id_': product.pk}),\n content_type='application/json',\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n assert response.status_code == 400\n assert product.line_set.all().exists() is True",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_add_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.add_facility'))",
"def test_add_admin_to_org(self):\n pass",
"def test_non_admin_create_one_equipment(self):\n self.client.login(username='ordinary_user', password='ordinary_password')\n founder = User.objects.get(username='ordinary_user')\n initial_equipments = Equipment.objects.count()\n url = reverse('equipments_list')\n data = {\n 'name': 'box',\n 'founder': founder.pk\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(Equipment.objects.count(), initial_equipments)",
"def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that 'admin' can add a product | def test_admin_create_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201) | [
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_product_info(self):\n self.add_product(self.TESTPRODUCT1, 1)\n\n # Missing product\n rv = self.app.get('/admin/product/nothing', follow_redirects=True)\n assert b'Produkten existerar inte!' in rv.data\n\n # Existing product\n rv = self.app.get('/admin/product/%s' % self.TESTPRODUCT1['barcode'], follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_product_list(self):\n # No products\n rv = self.app.get('/admin/product')\n assert rv.status_code == 200\n\n # More than 0 products\n self.add_product(self.TESTPRODUCT1, 1)\n self.add_product(self.TESTPRODUCT2, 2)\n rv = self.app.get('/admin/product', follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data\n assert self.TESTPRODUCT2['name'] in rv.data",
"def setUp(self):\r\n super(EditProductTest, self).setUp()\r\n self.product = self.F.ProductFactory.create()\r\n self.add_perm(\"manage_products\")",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_add_product(self):\n self.api_client.login(username=self.username, password=self.password)\n path = reverse('product:product_add')\n response = self.api_client.get(\n path\n )\n form = response.context['form']\n self.assertTemplateUsed(response, 'product/add-product.html')\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(form, ProductForm)",
"def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()",
"def test_create_sale_as_admin(self):\n response = self.client().post(self.route_products,\n headers=self.generate_admin_token(),\n data=json.dumps(self.product),\n content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('Product Successfully Created', str(response.data))\n response = self.client().post(self.route_sales,\n headers=self.generate_admin_token(),\n data=json.dumps(self.sale),\n content_type='application/json')\n self.assertEqual(response.status_code, 403)\n self.assertIn('Sorry, this route is only accessible to sales attendants', str(response.data))",
"def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)",
"def test_register_product():\n assert Product.objects.count() == 0\n Product.objects.create(\n product_name = 'Starwars', unit_price = 10, multiple = 1\n )\n assert Product.objects.count() == 1",
"def test_add_product(self):\r\n u = self.F.UserFactory()\r\n\r\n f = self.form(\r\n {\r\n \"name\": \"Two\",\r\n \"version\": \"1.0\",\r\n \"description\": \"not blank\",\r\n \"cc_version\": \"0\",\r\n },\r\n user=u)\r\n\r\n product = f.save()\r\n\r\n self.assertEqual(product.name, \"Two\")\r\n self.assertEqual(product.description, \"not blank\")\r\n self.assertEqual(product.created_by, u)\r\n\r\n version = product.versions.get()\r\n\r\n self.assertEqual(version.version, \"1.0\")\r\n self.assertEqual(version.created_by, u)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin(self):\n assert(admin)",
"def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_add_admin_to_org(self):\n pass",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test admin cannot create a product with a blacklisted token | def test_cannot_create_product_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | [
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n 'customer@customer.com',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_missing_token(self):\n\n self.register_test_admin_account()\n token = \"\"\n\n response = self.app_test_client.get(\n '{}/products'.format(self.BASE_URL),\n headers=dict(Authorization=token),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(helper_functions.convert_response_to_json(\n response)[\"Message\"], \"You need to login\")",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_product_cannot_create_with_invalid_details(self):\n res = self.client().post('/api/v1/products', data=json.dumps(self.empty_product), headers = {\"content-type\": \"application/json\"})\n self.assertEqual(res.status_code, 201)",
"def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_create_without_token(self):\n url = '/api/ingredients/'\n client = APIClient()\n\n response = client.post(url, self.ingredient_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.