query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Return the directory associated with the patient, study and series of the DICOM data set.
def get_series_directory(self, data_set): specific_character_set = ( data_set.as_string(odil.registry.SpecificCharacterSet) if odil.registry.SpecificCharacterSet in data_set else odil.Value.Strings()) def decode(value): return odil.as_unicode(value, specific_character_set) # Patient directory: <PatientName> or <PatientID>. patient_directory = decode( data_set.get( odil.registry.PatientName, data_set.get(odil.registry.PatientID))[0]) # Study directory: <StudyID>_<StudyDescription>, both parts are # optional. If both tags are missing or empty, raise an exception study_directory = [] study_id = data_set.get(odil.registry.StudyID) if study_id: study_directory.append(decode(study_id[0])) study_description = data_set.get(odil.registry.StudyDescription) if study_description: study_directory.append(decode(study_description[0])) if not study_directory: raise Exception("Study ID and Study Description are both missing") study_directory = "_".join(study_directory) # Study directory: <SeriesNumber>_<SeriesDescription>, both parts are # optional. If both tags are missing or empty, raise an exception series_directory = [] reconstruction = None series_number = data_set.get(odil.registry.SeriesNumber) if series_number: series_number = series_number[0] if series_number > 2**16: # Bruker ID based on experiment number and reconstruction number # is not readable: separate the two values experiment, reconstruction = divmod(series_number, 2**16) series_directory.append(str(experiment)) else: series_directory.append(str(series_number)) if not self.iso_9660: series_description = data_set.get(odil.registry.SeriesDescription) if series_description: series_directory.append(decode(series_description[0])) if not series_directory: raise Exception( "Series Number and Series Description are both missing") series_directory = "_".join(series_directory) if self.iso_9660: patient_directory = self.to_iso_9660(patient_directory) study_directory = self.to_iso_9660(study_directory) series_directory = self.to_iso_9660(series_directory) if reconstruction is not None: series_directory = os.path.join( series_directory, str(reconstruction)) return os.path.join(patient_directory, study_directory, series_directory)
[ "def _get_all_dicom_filepaths(patient_root_dir=''):\n walk = os.walk(patient_root_dir)\n \n dicom_filepaths = []\n for w in walk: \n dicom_files = [f for f in w[-1] if '.dcm' in f] # extracts .dcm files \n basepath = pathlib.Path(w[0]) # extracts the basepath \n full_paths = [os.path.join(basepath, df) for df in dicom_files] # combines the basepath with each .dcm file \n for f in full_paths:\n dicom_filepaths.append(f) # load into master list \n \n return dicom_filepaths", "def GetDirectory(self) -> \"char const *\":\n return _ITKIOImageBaseBasePython.itkRegularExpressionSeriesFileNames_GetDirectory(self)", "def get_session_dirs(activity, dss, year, doy):\n obs_suffix = \"dss%2d/%4d/%03d/\" % (dss, year, doy)\n activity_dir = act_proj_path + activity + \"/\"+obs_suffix\n project = activity_project(activity)\n project_dir = sci_proj_path + get_auto_project(project) + \"/\"\n session_dir = project_dir+obs_suffix # observing session working files\n wvsrdir = wvsr_dir + make_datadir_name(session_dir)\n fftdir = get_Naudet_FFT_dir(session_dir)\n # define data directory for data structure files (load or dump)\n projectname = get_real_project(project)\n realdir = projects_dir+projectname+\"/Observations/\"+obs_suffix\n datadir = fits_dir+obs_suffix\n return session_dir, realdir, activity_dir, project_dir, datadir, wvsrdir, fftdir", "def _getDataStorageDirectoryName(self):\n return self.COMPLEMENTARY_FILES", "def get_datasets_paths(self):\n global dir_covid\n global dir_normal\n\n #Loop through directories, subdirs and files for dir, subdir, file in os.walk(self.path)L\n\n for dir, subdir, file in os.walk(self.path):\n\n #Register last folder\n last_folder = os.path.basename(os.path.normpath(dir))\n\n #Check if last folder is covid\n if last_folder == 'covid':\n dir_covid = dir\n\n #Check if last folder is normal\n elif last_folder == 'normal':\n dir_normal = dir\n\n elif last_folder == 'saved':\n dir_saved = dir\n\n return dir_covid, dir_normal, dir_saved", "def get_dir(self, file=''):\n return self.data_dir + file", "def get_dicom(path):\n if not os.path.isdir(path):\n return None\n\n for root, dirs, files in os.walk(path):\n for fname in files:\n if '.dcm' in fname:\n return os.path.join(root, fname)\n return None", "def _get_dataset_dir(dataset_name, data_dir=None):\n if not data_dir:\n data_dir = os.path.join(os.getcwd(), 'Data')\n data_dir = os.path.join(data_dir, dataset_name)\n return data_dir", "def _get_directory(group: ParseResult) -> str:\n files = group[0]\n if len(files) == 1:\n return os.path.dirname(files[0])\n else:\n return os.path.commonpath(files)", "def _get_data_dirs(self):\n\t\tsubfolders = {\"positive\": [\"positive_R\", \"positive_L\"],\n\t\t\t\t\t\"mixed\": [\"positive_R\", \"positive_L\", \"negative_R\", \"negative_L\"]}\n\t\tdata_folder_paths = {sub: os.path.join(self.split_dir, sub) for sub in subfolders[self.label]}\n\t\treturn data_folder_paths", "def ai_data_directory(self) -> pathlib.Path:", "def get_dicoms(series_path: str) -> List[Types.SeriesObj]:\n try:\n dicoms = []\n for dicom in list(filter(lambda x: \".dcm\" in x, os.listdir(series_path))):\n d = process_local_DICOM(f\"{series_path}{dicom}\")\n dicoms.append(d)\n\n return dicoms\n except Exception as e:\n _logger.error(\n f\"An error occurred when acquiring Dicom's for {series_path}. Error: {e}. Must rerun to acquire data.\"\n )\n raise DICOMAccessError()", "def get_dir(self, year: int) -> Path:\n if year not in self.dbc_path:\n raise ValueError(f\"No ferc1 data for year {year}\")\n return self.dbc_path[year]", "def dirs_of(self, directory):\n return self.listings[directory]['dirs']", "def inspect_dicom_series(root_dir: str):\n import SimpleITK as sitk\n\n found_series = {}\n for d in os.walk(root_dir):\n dir = d[0]\n reader = sitk.ImageSeriesReader()\n sitk.ProcessObject_SetGlobalWarningDisplay(False)\n series_found = reader.GetGDCMSeriesIDs(dir)\n sitk.ProcessObject_SetGlobalWarningDisplay(True)\n logger.info(f\"Found {len(series_found)} series in directory {dir}\")\n for serie in series_found:\n dicom_names = reader.GetGDCMSeriesFileNames(dir, serie)\n found_series[serie] = dicom_names\n logger.info(f\"Total {len(found_series)} series in directory {root_dir}\")\n return found_series", "def get_url_directory(self):\n \n # get the directory path of the url\n fulldom = self.get_full_domain()\n urldir = fulldom\n\n if self.dirpath:\n newpath = \"\".join((self.URLSEP, \"\".join([ x+'/' for x in self.dirpath])))\n urldir = \"\".join((fulldom, newpath))\n\n return urldir", "def get_data_dirs():\n from pydoas import _LIBDIR \n return listdir(join(_LIBDIR, \"data\"))", "def get_galaxy_galaxydir(cat, datadir=None, htmldir=None, html=False, resampled=False):\n if datadir is None:\n datadir = legacyhalos.io.legacyhalos_data_dir()\n if htmldir is None:\n htmldir = legacyhalos.io.legacyhalos_html_dir()\n\n if type(cat) is astropy.table.row.Row:\n ngal = 1\n galaxy = [cat[GALAXYCOLUMN]]\n plate = [cat['PLATE']]\n else:\n ngal = len(cat)\n galaxy = cat[GALAXYCOLUMN]\n plate = cat['PLATE']\n\n if resampled:\n # need to fix the plate!\n galaxydir = np.array([os.path.join(datadir, 'resampled', str(plt), gal) for gal, plt in zip(galaxy, plate)])\n #galaxydir = np.array([os.path.join(datadir, 'resampled', get_plate(plt), gal) for gal, plt in zip(galaxy, plate)])\n else:\n galaxydir = np.array([os.path.join(datadir, str(plt), gal) for gal, plt in zip(galaxy, plate)])\n #galaxydir = np.array([os.path.join(datadir, get_plate(plt), gal) for gal, plt in zip(galaxy, plate)])\n \n if html:\n htmlgalaxydir = np.array([os.path.join(htmldir, str(plt), gal) for gal, plt in zip(galaxy, plate)])\n #htmlgalaxydir = np.array([os.path.join(htmldir, get_plate(plt), gal) for gal, plt in zip(galaxy, plate)])\n\n if ngal == 1:\n galaxy = galaxy[0]\n galaxydir = galaxydir[0]\n if html:\n htmlgalaxydir = htmlgalaxydir[0]\n\n if html:\n return galaxy, galaxydir, htmlgalaxydir\n else:\n return galaxy, galaxydir", "def dir_logs(self):\n d = self.dir_dettype()\n return os.path.join(d, self.dirname_log)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an ISO9660 compatible version of input string.
def to_iso_9660(self, value): value = value[:8].upper() value = re.sub(r"[^A-Z0-9_]", "_", value) return value
[ "def format_filename(s):\n valid_chars = \"-_. %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n return filename", "def from_iso(self) -> str:\n return self._frm_iso", "def makestr(string: str):\n if os.path.isfile(string) or os.path.isdir(string):\n return makepath(string)\n return NonePath(string)", "def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename", "def getStringFromLongFilename(firstChars, secondChars, thirdChars): \n #TODO: This is not 100% correct... .\n filename = \"\"\n for i in range(0, len(firstChars)-1,2):\n if firstChars[i] != 0xff and firstChars[i] != 0x00:\n filename += chr(firstChars[i])\n for i in range(0, len(secondChars)-1,2):\n if secondChars[i] != 0xff and secondChars[i] != 0x00:\n filename += chr(secondChars[i])\n for i in range(0, len(thirdChars)-1,2):\n if thirdChars[i] != 0xff and thirdChars[i] != 0x00:\n filename += chr(thirdChars[i])\n return filename", "def wsi_filename_to_patient(wsi_filename):\n\n wsi_filename = os.path.basename(wsi_filename)\n return \"-\".join(wsi_filename.split(\"-\")[:3])", "def cs2iso(datestring):\n from datetime import datetime # http://stackoverflow.com/a/9182195\n datestring = datestring.replace('. ', '.')\n try:\n return datetime.strptime(datestring, '%d.%m.%Y').date().isoformat()\n except ValueError:\n return datetime.strptime(datestring, '%d.%m.%Y %H:%M').isoformat(' ')", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('folder', type=str, help='Folder to ISOse')\n\n args = parser.parse_args()\n\n from io import BytesIO\n\n if not os.path.exists(args.folder):\n return \n lastname = os.path.split(args.folder)[-1]\n\n time_prefix = datetime.datetime.now().replace(microsecond=0).isoformat().replace(':', '-')\n isoname = \"%(time_prefix)s-%(lastname)s.iso\" % vars()\n\n iso = pycdlib.PyCdlib()\n\n iso.new(#joliet=True, \n #rock_ridge='1.09', \n udf='2.60',\n interchange_level=4)\n #directory\n\n exclude_dirs = ['.git', '.vagrant', 'tmp']\n\n for root, dirnames, filenames in os.walk(args.folder):\n ok = True\n for ex_ in exclude_dirs:\n if os.path.sep + ex_ + os.path.sep in root:\n ok = False\n break\n\n if not ok:\n continue \n\n for directory in dirnames:\n if directory in exclude_dirs:\n continue\n if '.git' in root:\n continue\n if '.vagrant' in root:\n continue\n direct = \"/\" + directory\n print(direct)\n iso.add_directory(\n direct,\n #joliet_path=f'/{direct}', \n #rr_name=directory\n )\n for file_ in filenames:\n file = '' + file_\n print(os.path.join(root, file))\n iso.add_file(\n os.path.join(root, file),\n f'/{file};3',\n #joliet_path=f'/{file}',\n #rr_name=file\n )\n # current_total_size += path.getsize(path.join(root, file))\n\n iso.write(isoname)\n iso.close()\n return 0", "def fixed_filename(filename):\n res = FOUR_NUM_END.match(filename)\n if res is None:\n return None\n return res.expand(r'\\1\\2-\\3.\\4')", "def volume(str):\n\treturn (str.count('|') * str.count('-') * str.count('L'))/27", "def gen_path(fdp):\n s1 = fdp.ConsumeUnicodeNoSurrogates(15)\n while s1.count(\"/\") > 4:\n # Replace the first occurrence of /\n s1 = s1.replace(\"/\", \"a\", 1)\n return s1", "def test_make_8dot3_name_noext():\n fde = mock.MagicMock()\n fde._encoding = \"ASCII\"\n fde.get_entries.return_value = ([], [], [])\n sfn = EightDotThree()\n lfn = sfn.make_8dot3_name(\"This is a long filename\", fde)\n assert \"THISIS\" == lfn\n assert sfn.is_8dot3_conform(lfn)", "def _get_iso_image_name(node):\n return \"boot-%s.iso\" % node.uuid", "def valid_file_name_linux(name:str,default_char:str=\"-\") -> str:\r\n if default_char in invalid_linux_char:\r\n default_char = \"-\"\r\n tabla = {ord(c):default_char for c in invalid_linux_char}\r\n name = name.translate(tabla).strip()\r\n return name if name else \"archivo\"", "def getfilesystemencoding():\n\tpass", "def convertDateToISO(date):\n if re.search(\"^[0-9]{2}/[0-9]{2}/[0-9]{4}$\", date):\n date = date[6:10] + \"-\" + date[3:5] + \"-\" + date[0:2]\n else:\n raise ValueError(\"Incorrect date format\")\n return date", "def Ida2Olly(sig) -> str:\n\n pattern = []\n\n for entry in sig.split(' '):\n if entry == '?':\n pattern.append('??')\n else:\n pattern.append(entry)\n\n return \" \".join(pattern)", "def generate_isoform_sequence(reference_sequence, mutation_positions, isoform):\n\n if len(reference_sequence) == 0:\n return ''\n\n # The length of \"mut positions\" and \"isoform\" string should be identical.\n if len(mutation_positions) != len(isoform):\n return ''\n\n isoform_sequence_list = list(reference_sequence)\n isoform_index = 0\n for position in mutation_positions:\n isoform_sequence_list[position - 1] = isoform[isoform_index]\n isoform_index += 1\n\n return \"\".join(isoform_sequence_list)", "def make_filename_safe(filename):\n allowed_length = 255 # windows doesn't support more than 255 character filenames\n allowed_chars = string.ascii_letters + string.digits + \"~ -_.()\"\n safe_filename = ''.join(c for c in filename if c in allowed_chars)\n return safe_filename[:allowed_length]", "def get_filename() -> str:\n filename = input(\"Enter the journal filename: \")\n return filename" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function get all albums dates of a user
def db_annotater_get_user_album_dates(albums_queryset): # analyse the queryset of all albums of a user latest_date = ""#datetime.now().date() submit_dates = [] unsubmit_dates = [] latest_album = None for album_date in albums_queryset: if album_date['annotation'] is True: new_date = get_date_dash_d_m_y(album_date['capture_date']) submit_dates.append(new_date) else: new_date = get_date_dash_d_m_y(album_date['capture_date']) unsubmit_dates.append(new_date) if len(albums_queryset) > 0: latest_album= albums_queryset.reverse()[0] latest_date = latest_album['capture_date'] latest_date = get_date_dash_d_m_y(latest_date) latest_album_id = latest_album['id'] album_dates = {'ld':latest_date,'s':submit_dates,'u':unsubmit_dates} return (latest_album,album_dates)
[ "def db_annotater_get_latest_user_albums(album_date):\n\tstart_at\t= album_date['start_at']\n\tend_at\t\t= album_date['end_at']\n\t(hours, mins, secs) = get_time_diff_h_m_s(start_at, end_at)\n\twear_time \t= [{\"hours\":str(hours),\"minutes\":str(mins)}]\n\talbum_id \t= album_date['id']\n\tif album_date['annotation'] is True:\n\t\tsubmitted = \"Yes\"\n\telse:\n\t\tsubmitted = \"No\"\n\tcapture_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t# get images\n\timages \t\t= db_annotater_get_album_images(album_id)\n\tone_album \t= {\"wearTime\" : wear_time, \\\n\t\t\t\t\"submitted\" : submitted, \\\n\t\t\t\t\"date\" : capture_date, \\\n\t\t\t\t\"images\" : images}\n\treturn [one_album]", "def get_dates(decks: QuerySet) -> List[date]:\n return list(decks.values_list(\"date_created\", flat=True).distinct())", "def dates(self):\n drs = self._data_record_class.objects.filter(**self._kwargs()).values('date').distinct()\n return [d['date'] for d in drs]", "def getTopAlbums(self, user=None, period=\"overall\"):\n pass", "def vk_get_album_list(request):\n if not request.user.is_superuser:\n return redirect('%s?next=%s' % (reverse('dc_parse:admin_auth'), request.path))\n vk_token,vk_user = get_vk_cookies(request)\n method_name = 'photos.getAlbums'\n parameters = {\n 'owner_id': vk_user,\n 'need_covers': 1,\n 'need_system': 1,\n }\n content = vk_method(method_name,vk_token,parameters)\n\n albums = content['items']\n for album in albums:\n album['created'] = psql_time(album.get('created')) if isinstance(album.get('created'),int) else None\n album['updated'] = psql_time(album.get('updated')) if isinstance(album.get('updated'),int) else None\n\n return render(request,'vk_get_album_list.html',{\n # 'content': content,\n 'albums': content['items'],\n # 'album': album,\n # 'tags': tags,\n # 'resume': resume\n })", "def get_album_photos(self, user_id, photoset_id, page=1):\n\n resp = requests.get(Flickr.REST_BASE_URL, params={\n \"method\": \"flickr.photosets.getPhotos\",\n \"api_key\": self.__apikey,\n \"user_id\": user_id,\n \"photoset_id\": photoset_id,\n \"format\": \"json\",\n \"nojsoncallback\": 1,\n \"extras\": \" url_sq,url_t,url_s,url_q,url_m,url_n,url_z,url_c,url_l,url_o,description,tags,owner_name,license\",\n \"per_page\": self.page_size,\n \"page\": page\n })\n\n if resp.status_code != 200:\n raise Exception(\"Error fetching Flickr album photo list. Status code: %s\"%(resp.status_code))\n\n ps = resp.json()\n\n if ps[\"stat\"] != \"ok\":\n raise Exception(\"Error fetching Flickr album photo list. Reason: %s\"%ps[\"message\"])\n\n return ps", "def get(self): \n return getAllAlbums()", "def make_timeline_data(self,user):\n annos = json.loads(self.user_annos.get(user))\n dates = [a['updated'] for a in annos]\n dates = [parser.parse(date) for date in dates]\n dates.sort()\n dates = dates\n \n first = dates[0]\n last = dates[-1]\n \n def perdelta(start, end, delta):\n curr = start\n while curr < end:\n yield curr.strftime('%Y-%m-%d')\n curr += delta\n \n day_dict = defaultdict(int)\n for date in dates:\n day = date.strftime('%Y-%m-%d')\n day_dict[day] += 1\n \n for day in perdelta(first, last, timedelta(days=1)):\n if day_dict.has_key(day) == False:\n day_dict[day] = 0\n \n days = day_dict.keys()\n days.sort()\n counts = [day_dict[day] for day in days]\n return counts, days", "def get_dates(filename):\n try:\n #TODO: we changed the file to part 1 just to delete old files!\n songs = pd.read_json(filename, orient='table')\n except AssertionError as err:\n raise err\n dates = list(songs['Date'])\n albums = songs[\"Album\"]\n artists = songs[\"Artist\"]\n found_albums = {}\n start = 0\n try:\n for index in range(len(albums)):\n if 48 <= ord(str(dates[index])[0]) <= 57:\n continue\n if str(albums[index]).lower() not in found_albums:\n webD.get(\n 'https://www.discogs.com/search/?q=' + str(albums[\n index]).split('(')[0].replace(' ', '+') + '+' + str(\n artists[index]).replace(' ', '+') +\n '&type=release&layout=sm')\n if start == 0:\n time.sleep(2)\n webD.find_element_by_xpath(\n '/html/body/div[5]/div[2]/div/div[1]/div/div[2]/div/button[2]').click()\n time.sleep(2)\n webD.find_element_by_xpath(\n '/html/body/div[5]/div[3]/div[3]/div[1]/button').click()\n start = 1\n try:\n card = webD.find_element_by_class_name('card_body')\n title = card.find_element_by_tag_name('h4').text.lower()\n if str(albums[index]).lower().split(' (')[0] not in title:\n if str(artists[index]).lower().split(' (')[0] not in \\\n title:\n continue\n year = card.find_element_by_class_name(\n 'card_release_year').text[-4:]\n if int(year) < 1980:\n year = None\n except ElementClickInterceptedException:\n year = None\n except NoSuchElementException:\n year = None\n found_albums[str(albums[index]).lower()] = year\n dates[index] = found_albums[str(albums[index]).lower()]\n print(str(dates[index])[0])\n songs = songs.assign(Date=dates)\n songs.to_json(filename, orient='table', indent=4)\n except WebDriverException:\n songs = songs.assign(Date=dates)\n songs.to_json(filename, orient='table', indent=4)", "def get_calendar_list(username, widget):\n # Get google credentials\n credentials = get_google_creds(username, widget)\n \n # Check the oauth creds and refresh if necessary\n credentials = validate_and_refresh_creds(credentials)\n\n #gcal_logger.info(\"Validated credentials: {0}\".format(credentials))\n if credentials.get(\"redirect\", None):\n return credentials\n\n # Set appropriate credentials\n set_google_creds(username, credentials, widget)\n\n # Get gcal service\n service = get_gcal_service_from_credentials(credentials)\n\n calendar_list = get_gcal_from_service(service)\n # Get google calendar list\n\n # TODO(arthurb): Change into a cool one-liner; maybe use lambda calc\n # Extract relevant info from google calendar list\n calendars = []\n for item in calendar_list:\n calendars.append({\"name\": item[\"summary\"], \"id\": item[\"id\"]})\n\n return {\"calendar_list\": calendars}", "async def get_hanukkah_dates(self) -> List[str]:\n hanukkah_dates = []\n async with self.bot.http_session.get(self.url) as response:\n json_data = await response.json()\n festivals = json_data['items']\n for festival in festivals:\n if festival['title'].startswith('Chanukah'):\n date = festival['date']\n hanukkah_dates.append(date)\n return hanukkah_dates", "def getWeeklyAlbumChart(self, user=None, _from=None, to=None):\n pass", "def get_map_location_dates(username):\n date_list = []\n with sql.connect(database_locations) as cur:\n res = cur.execute(f\"\"\"\n SELECT DISTINCT date \n From Location \n WHERE tid='{username}'\n ORDER BY tst DESC;\n \"\"\")\n for _date, in res:\n date_list.append(_date)\n return date_list", "def getDatesList():\n lstDates = []\n curD.execute(\"SELECT Date FROM DataDates ORDER BY Date;\")\n recs = curD.fetchall()\n for rec in recs:\n lstDates.append(rec[\"Date\"])\n return lstDates", "def _getPeriodUsers(self, start_date, final_date):\n self._logger.info(\"Getting users from \" + start_date +\n \" to \" + final_date)\n\n url = self._getURL(1, start_date, final_date)\n data = self._readAPI(url)\n users = []\n\n total_pages = 10000\n page = 1\n\n while total_pages >= page:\n url = self._getURL(page, start_date, final_date)\n data = self._readAPI(url)\n\n for u in data['items']:\n users.append(u[\"login\"])\n self._names.put(u[\"login\"])\n total_count = data[\"total_count\"]\n total_pages = int(total_count / 100) + 1\n page += 1\n return users", "def all_available_dates(reference_stock=\"ANZ\"):\n # use reference_stock to quickly search the db by limiting the stocks searched\n dates = Quotation.objects.mongo_distinct(\n \"fetch_date\", {\"asx_code\": reference_stock}\n )\n ret = sorted(dates, key=lambda k: datetime.strptime(k, \"%Y-%m-%d\"))\n return ret", "def getEvents(self, user=None):\n pass", "def get_dates(url, start_year, end_year):\n # all URLs of `url`\n dates = []\n\n try:\n for year in range(start_year, end_year + 1):\n # domain name of the URL without the protocol\n # print(\"url \", url)\n content = url + str(year) + \"/contents.html\"\n # print(\"content \",content)\n days = get_href(content, \"contents.html\")\n # print(\"days \",days)\n for day in days:\n dates.append(day)\n except Exception as e:\n raise e\n\n return dates", "def get_daily_loads(self, start_date, end_date=None):\n if not end_date:\n return self.dlo_container[str(start_date)]\n else:\n all_dates = list(self.dlo_container.keys())\n i_start = all_dates.index(str(start_date))\n i_end = all_dates.index(str(end_date))\n selected_dates = all_dates[i_start:i_end + 1]\n return (self.dlo_container[date] for date in selected_dates)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function get all albums dates of a user
def db_annotater_get_latest_user_albums(album_date): start_at = album_date['start_at'] end_at = album_date['end_at'] (hours, mins, secs) = get_time_diff_h_m_s(start_at, end_at) wear_time = [{"hours":str(hours),"minutes":str(mins)}] album_id = album_date['id'] if album_date['annotation'] is True: submitted = "Yes" else: submitted = "No" capture_date = get_date_dash_d_m_y(album_date['capture_date']) # get images images = db_annotater_get_album_images(album_id) one_album = {"wearTime" : wear_time, \ "submitted" : submitted, \ "date" : capture_date, \ "images" : images} return [one_album]
[ "def db_annotater_get_user_album_dates(albums_queryset):\n\n\t# analyse the queryset of all albums of a user\n\tlatest_date \t= \"\"#datetime.now().date()\n\tsubmit_dates\t= []\n\tunsubmit_dates\t= []\n\tlatest_album\t= None \n\tfor album_date in albums_queryset:\n\t\tif album_date['annotation'] is True:\n\t\t\tnew_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t\t\tsubmit_dates.append(new_date)\n\t\telse:\n\t\t\tnew_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t\t\tunsubmit_dates.append(new_date)\n\tif len(albums_queryset) > 0:\n\t\tlatest_album= albums_queryset.reverse()[0]\n\t\tlatest_date = latest_album['capture_date']\n\t\tlatest_date = get_date_dash_d_m_y(latest_date)\n\t\tlatest_album_id = latest_album['id']\n\talbum_dates = {'ld':latest_date,'s':submit_dates,'u':unsubmit_dates} \n\treturn (latest_album,album_dates)", "def get_dates(decks: QuerySet) -> List[date]:\n return list(decks.values_list(\"date_created\", flat=True).distinct())", "def dates(self):\n drs = self._data_record_class.objects.filter(**self._kwargs()).values('date').distinct()\n return [d['date'] for d in drs]", "def getTopAlbums(self, user=None, period=\"overall\"):\n pass", "def vk_get_album_list(request):\n if not request.user.is_superuser:\n return redirect('%s?next=%s' % (reverse('dc_parse:admin_auth'), request.path))\n vk_token,vk_user = get_vk_cookies(request)\n method_name = 'photos.getAlbums'\n parameters = {\n 'owner_id': vk_user,\n 'need_covers': 1,\n 'need_system': 1,\n }\n content = vk_method(method_name,vk_token,parameters)\n\n albums = content['items']\n for album in albums:\n album['created'] = psql_time(album.get('created')) if isinstance(album.get('created'),int) else None\n album['updated'] = psql_time(album.get('updated')) if isinstance(album.get('updated'),int) else None\n\n return render(request,'vk_get_album_list.html',{\n # 'content': content,\n 'albums': content['items'],\n # 'album': album,\n # 'tags': tags,\n # 'resume': resume\n })", "def get_album_photos(self, user_id, photoset_id, page=1):\n\n resp = requests.get(Flickr.REST_BASE_URL, params={\n \"method\": \"flickr.photosets.getPhotos\",\n \"api_key\": self.__apikey,\n \"user_id\": user_id,\n \"photoset_id\": photoset_id,\n \"format\": \"json\",\n \"nojsoncallback\": 1,\n \"extras\": \" url_sq,url_t,url_s,url_q,url_m,url_n,url_z,url_c,url_l,url_o,description,tags,owner_name,license\",\n \"per_page\": self.page_size,\n \"page\": page\n })\n\n if resp.status_code != 200:\n raise Exception(\"Error fetching Flickr album photo list. Status code: %s\"%(resp.status_code))\n\n ps = resp.json()\n\n if ps[\"stat\"] != \"ok\":\n raise Exception(\"Error fetching Flickr album photo list. Reason: %s\"%ps[\"message\"])\n\n return ps", "def get(self): \n return getAllAlbums()", "def make_timeline_data(self,user):\n annos = json.loads(self.user_annos.get(user))\n dates = [a['updated'] for a in annos]\n dates = [parser.parse(date) for date in dates]\n dates.sort()\n dates = dates\n \n first = dates[0]\n last = dates[-1]\n \n def perdelta(start, end, delta):\n curr = start\n while curr < end:\n yield curr.strftime('%Y-%m-%d')\n curr += delta\n \n day_dict = defaultdict(int)\n for date in dates:\n day = date.strftime('%Y-%m-%d')\n day_dict[day] += 1\n \n for day in perdelta(first, last, timedelta(days=1)):\n if day_dict.has_key(day) == False:\n day_dict[day] = 0\n \n days = day_dict.keys()\n days.sort()\n counts = [day_dict[day] for day in days]\n return counts, days", "def get_dates(filename):\n try:\n #TODO: we changed the file to part 1 just to delete old files!\n songs = pd.read_json(filename, orient='table')\n except AssertionError as err:\n raise err\n dates = list(songs['Date'])\n albums = songs[\"Album\"]\n artists = songs[\"Artist\"]\n found_albums = {}\n start = 0\n try:\n for index in range(len(albums)):\n if 48 <= ord(str(dates[index])[0]) <= 57:\n continue\n if str(albums[index]).lower() not in found_albums:\n webD.get(\n 'https://www.discogs.com/search/?q=' + str(albums[\n index]).split('(')[0].replace(' ', '+') + '+' + str(\n artists[index]).replace(' ', '+') +\n '&type=release&layout=sm')\n if start == 0:\n time.sleep(2)\n webD.find_element_by_xpath(\n '/html/body/div[5]/div[2]/div/div[1]/div/div[2]/div/button[2]').click()\n time.sleep(2)\n webD.find_element_by_xpath(\n '/html/body/div[5]/div[3]/div[3]/div[1]/button').click()\n start = 1\n try:\n card = webD.find_element_by_class_name('card_body')\n title = card.find_element_by_tag_name('h4').text.lower()\n if str(albums[index]).lower().split(' (')[0] not in title:\n if str(artists[index]).lower().split(' (')[0] not in \\\n title:\n continue\n year = card.find_element_by_class_name(\n 'card_release_year').text[-4:]\n if int(year) < 1980:\n year = None\n except ElementClickInterceptedException:\n year = None\n except NoSuchElementException:\n year = None\n found_albums[str(albums[index]).lower()] = year\n dates[index] = found_albums[str(albums[index]).lower()]\n print(str(dates[index])[0])\n songs = songs.assign(Date=dates)\n songs.to_json(filename, orient='table', indent=4)\n except WebDriverException:\n songs = songs.assign(Date=dates)\n songs.to_json(filename, orient='table', indent=4)", "def get_calendar_list(username, widget):\n # Get google credentials\n credentials = get_google_creds(username, widget)\n \n # Check the oauth creds and refresh if necessary\n credentials = validate_and_refresh_creds(credentials)\n\n #gcal_logger.info(\"Validated credentials: {0}\".format(credentials))\n if credentials.get(\"redirect\", None):\n return credentials\n\n # Set appropriate credentials\n set_google_creds(username, credentials, widget)\n\n # Get gcal service\n service = get_gcal_service_from_credentials(credentials)\n\n calendar_list = get_gcal_from_service(service)\n # Get google calendar list\n\n # TODO(arthurb): Change into a cool one-liner; maybe use lambda calc\n # Extract relevant info from google calendar list\n calendars = []\n for item in calendar_list:\n calendars.append({\"name\": item[\"summary\"], \"id\": item[\"id\"]})\n\n return {\"calendar_list\": calendars}", "async def get_hanukkah_dates(self) -> List[str]:\n hanukkah_dates = []\n async with self.bot.http_session.get(self.url) as response:\n json_data = await response.json()\n festivals = json_data['items']\n for festival in festivals:\n if festival['title'].startswith('Chanukah'):\n date = festival['date']\n hanukkah_dates.append(date)\n return hanukkah_dates", "def getWeeklyAlbumChart(self, user=None, _from=None, to=None):\n pass", "def get_map_location_dates(username):\n date_list = []\n with sql.connect(database_locations) as cur:\n res = cur.execute(f\"\"\"\n SELECT DISTINCT date \n From Location \n WHERE tid='{username}'\n ORDER BY tst DESC;\n \"\"\")\n for _date, in res:\n date_list.append(_date)\n return date_list", "def getDatesList():\n lstDates = []\n curD.execute(\"SELECT Date FROM DataDates ORDER BY Date;\")\n recs = curD.fetchall()\n for rec in recs:\n lstDates.append(rec[\"Date\"])\n return lstDates", "def _getPeriodUsers(self, start_date, final_date):\n self._logger.info(\"Getting users from \" + start_date +\n \" to \" + final_date)\n\n url = self._getURL(1, start_date, final_date)\n data = self._readAPI(url)\n users = []\n\n total_pages = 10000\n page = 1\n\n while total_pages >= page:\n url = self._getURL(page, start_date, final_date)\n data = self._readAPI(url)\n\n for u in data['items']:\n users.append(u[\"login\"])\n self._names.put(u[\"login\"])\n total_count = data[\"total_count\"]\n total_pages = int(total_count / 100) + 1\n page += 1\n return users", "def all_available_dates(reference_stock=\"ANZ\"):\n # use reference_stock to quickly search the db by limiting the stocks searched\n dates = Quotation.objects.mongo_distinct(\n \"fetch_date\", {\"asx_code\": reference_stock}\n )\n ret = sorted(dates, key=lambda k: datetime.strptime(k, \"%Y-%m-%d\"))\n return ret", "def getEvents(self, user=None):\n pass", "def get_dates(url, start_year, end_year):\n # all URLs of `url`\n dates = []\n\n try:\n for year in range(start_year, end_year + 1):\n # domain name of the URL without the protocol\n # print(\"url \", url)\n content = url + str(year) + \"/contents.html\"\n # print(\"content \",content)\n days = get_href(content, \"contents.html\")\n # print(\"days \",days)\n for day in days:\n dates.append(day)\n except Exception as e:\n raise e\n\n return dates", "def get_daily_loads(self, start_date, end_date=None):\n if not end_date:\n return self.dlo_container[str(start_date)]\n else:\n all_dates = list(self.dlo_container.keys())\n i_start = all_dates.index(str(start_date))\n i_end = all_dates.index(str(end_date))\n selected_dates = all_dates[i_start:i_end + 1]\n return (self.dlo_container[date] for date in selected_dates)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function get all annotation terms from the database and return it to the interface
def db_get_annotation_terms(uid): terms = {} terms_queryset = AnnotationTerm.objects.filter(Q(private=False) | Q(user=uid)).values('concept', 'category') # all public terms for term_attr in terms_queryset: # get attributes category = str(term_attr['category']).strip() concept = str(term_attr['concept']).strip() if category in terms: terms_list = terms[category] # here is the refer, not a copy terms_list.append(concept) else: terms[category] = [concept] return terms
[ "def get_terms(self):\n return Term.objects.filter(projects=self) # get a queryset of all terms for a project\n # [term.get_mapping(self.appname) for term in project_terms]", "def annotate_terms(text_file, output_file):\n init_data = read_data('lexicon.tsv')\n data = select_data(init_data)\n text_dataframe = lemma_posttag(text_file)\n annotate(data, text_dataframe)\n annotation = construct_annotated_text(text_dataframe)\n with open(output_file, 'w') as f:\n f.write(annotation)\n print(\"Your file has been annotated.\")", "def get_gilda_terms(prefix: str, url: Optional[str] = None) -> Iterable[gilda.term.Term]:\n id_to_name = get_id_name_mapping(prefix, url=url)\n for identifier, name in tqdm(id_to_name.items(), desc='mapping names'):\n yield gilda.term.Term(\n norm_text=normalize(name),\n text=name,\n db=prefix,\n id=identifier,\n entry_name=name,\n status='name',\n source=prefix,\n )\n\n id_to_synonyms = get_id_synonyms_mapping(prefix, url=url)\n for identifier, synonyms in tqdm(id_to_synonyms.items(), desc='mapping synonyms'):\n name = id_to_name[identifier]\n for synonym in synonyms:\n yield gilda.term.Term(\n norm_text=normalize(synonym),\n text=synonym,\n db=prefix,\n id=identifier,\n entry_name=name,\n status='synonym',\n source=prefix,\n )", "def idxterms(self):\n try:\n terms = self._json.get(\"idxterms\", {}).get('mainterm', [])\n except AttributeError: # idxterms is empty\n return None\n if not isinstance(terms, list):\n terms = [terms]\n try:\n return [d['$'] for d in terms]\n except AttributeError:\n return None", "def _create_annotated_docs(self, filter_terms: Optional[Set[str]] = None) -> List[Doc]:\n raise NotImplementedError", "def get_terms(self):\n \n return self.overall_terms", "def annotate_all(self):\n logger.info(\"Annotating data\")\n self.genomic_df = self.genomic_df.merge(\n self.annotation_df, how=\"left\", on=[\"IDENTIFIER\"]\n )\n self.genomic_df = self._string_split(self.genomic_df, \"GENE\", \",\")\n self.annotate = True", "def getAvailableTerms():\n # type: () -> List[String]\n return [\"term1\", \"term2\"]", "def __call__(self, queries):\n\n # Parse queries and extract keyword terms for each query\n if self.database:\n terms = []\n for query in queries:\n # Parse query\n parse = self.database.parse(query)\n\n # Join terms from similar clauses\n terms.append(\" \".join(\" \".join(s) for s in parse[\"similar\"]))\n\n return terms\n\n # Return original query when database is None\n return queries", "def get_all_phyla():\n return models.Taxonomy.objects.filter(rank='phylum').order_by('name')", "def getLicenseAllAbbr():\n entries = license_description.objects.values(\"abbreviation\")\n return_list = []\n for entry in entries:\n return_list.append(entry[\"abbreviation\"])\n \n return return_list", "def getListofTag(dataset,nlp):\n \n# nlp=spacy.load('fr_core_news_sm') #Load the pre-existed french model of spacy\n data={\"Name\":dataset[\"Name\"],\"Tag\":[]}\n tags=[]\n for text in dataset[\"Text\"]:\n tags.append(ListofTagInsidedoc(text))\n data[\"Tag\"]=tags\n return pd.DataFrame(data)", "def get_annotations(self):\n\n variants = self.ids()\n variants = np.array([var.replace(':', '-').replace('/', '-') for var in variants], dtype='object')\n\n url = 'https://api.missionbio.io/annotations/v1/variants?ids=' + ','.join(variants.astype(str))\n r = requests.get(url=url)\n vars = r.text.split('chromosome')[1:]\n genes = deepcopy(variants)\n\n for ii in range(len(vars)):\n\n vals = vars[ii].split('\"')\n p = np.array(np.where(np.isin(vals, ['Protein'])))[0]\n g = np.array(np.where(np.isin(vals, ['Gene'])))[0]\n if len(g) == 0:\n continue\n\n prot = vals[p[0] + 4]\n gene = vals[g[0] + 4]\n\n patho = vars[ii].find('Pathogenic') != -1\n lpatho = vars[ii].find('Likely') != -1\n missense = vars[ii].find('missense') != -1\n nonsense = vars[ii].find('nonsense') != -1\n\n variants[ii] = ('(PATH) ' if patho else '') + \\\n ('(L.PATH) ' if (lpatho & (not patho)) else '') + \\\n ('(MISS) ' if (missense & (not patho) & (not lpatho)) else '') + \\\n ('(NONS) ' if (nonsense & (not patho) & (not lpatho)) else '') + \\\n (gene if (len(prot) == 0) & (len(gene) > 0) else '') + \\\n (prot) + \\\n (' - ' if len(gene) > 0 else '') + variants[ii]\n\n genes[ii] = gene if len(gene) else variants[ii]\n\n return variants", "def get_terms(self) -> set:\n return self.dictionary.words", "def _get_terms(self, context=None):\n from hexagonit.portletstyle.vocabulary import StylesVocabulary\n return list(StylesVocabulary()(context))", "def annotate(terms_dataframe, text_dataframe):\n rules(terms_dataframe, text_dataframe) # apply rules\n for i, token in enumerate(text_dataframe['lemma']):\n for term in terms_dataframe['lemma']:\n term = term.split(' ')\n # Case 1: if terms of length 4, we check if each word from text corresponds to each word in the term\n if len(term) == 4:\n term_1 = term[0]\n if token == term_1 and len(text_dataframe['lemma']) >= i + 4:\n if text_dataframe['lemma'][i + 1] == term[1] and text_dataframe['lemma'][i + 2] == term[2] and \\\n text_dataframe['lemma'][i + 3] == term[3]:\n if text_dataframe['lemma'][i + 4] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 4] = text_dataframe['tokens'][i + 4] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 3] = text_dataframe['tokens'][i + 3] + ']'\n # Case 2: terms of length 3\n elif len(term) == 3:\n term_1 = term[0]\n if token == term_1 and len(text_dataframe['lemma']) > i + 3:\n if text_dataframe['lemma'][i + 1] == term[1] and text_dataframe['lemma'][i + 2] == term[2]:\n if text_dataframe['lemma'][i + 3] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 3] = text_dataframe['tokens'][i + 3] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n # Case 3: terms of length 2\n elif len(term) == 2:\n if token == term[0] and len(text_dataframe['lemma']) > i + 2:\n if text_dataframe['lemma'][i + 1] == term[1]:\n if text_dataframe['lemma'][i + 2] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 1] = text_dataframe['tokens'][i + 1] + ']'\n # Case 4: term of length 1\n elif token == term[0] and i > 1 and text_dataframe['lemma'][i - 1] == 'of' and text_dataframe['lemma'][\n i - 2] == 'sequence':\n text_dataframe['tokens'][i - 2] = '[' + text_dataframe['tokens'][i - 2]\n text_dataframe['tokens'][i] = text_dataframe['tokens'][i] + ']'\n elif token == term[0] and len(term) == 1 and len(text_dataframe['lemma']) >= i + 2 and \\\n text_dataframe['lemma'][i + 1] == ')':\n if text_dataframe['lemma'][i + 2] in rule_4:\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i] + ']'\n elif token == term[0] and len(term) == 1 and len(text_dataframe['lemma']) >= i + 1:\n if text_dataframe['lemma'][i + 1] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 1] = text_dataframe['tokens'][i + 1] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i] + ']'\n if i != 0:\n if text_dataframe['lemma'][i - 1] in rule_adj and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1] + ']'\n elif i >= 3 and text_dataframe['lemma'][i - 1] in rule_adj and text_dataframe['lemma'][\n i - 3] == 'non' and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 3] = '[' + text_dataframe['tokens'][i - 3]\n text_dataframe['tokens'][i - 3] = text_dataframe['tokens'][i - 1] + ']'\n return text_dataframe", "def locations(term):", "def terms(self):\n return self._offr['terms'].keys()", "def get_all_annotations(organization_dict, token):\n\n headers = {\"Authorization\": \"token {0}\".format(token)}\n response = requests.get(\"https://api.elis.rossum.ai/v1/annotations?organization={0}\".format(organization_dict[\"id\"]),\n headers=headers)\n\n if response.status_code == 200:\n print(\"Fetching annotations - OK\")\n else:\n print(\"Fetching annotations - ERROR\")\n\n return response.json()[\"results\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function deletes one image from the database as mark visible = False
def db_delete_one_image(imgId): print "delete one image from database: "+ str(imgId) image = Picture.objects.get(pk=imgId) image.visible = False image.save()
[ "def delete(self, *args, **kwargs):\n self.image.delete()\n super(StoredImage, self).delete(*args, **kwargs)", "def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)", "def delete_image(self, http_request, image_id):\n image = self.image_by_id(image_id)\n if image:\n self.glance_admin_image_store.remove(image)\n http_request.setResponseCode(204)\n return b''\n http_request.setResponseCode(404)\n return b''", "def img_delete_by_id(self, img_id: int) -> None:\n img = self.img_by_id(img_id)\n if img:\n self.__session.delete(img)\n self.commit()\n else:\n print('No such image')", "def img_delete_by_path(self, img_path: str) -> None:\n img = self.img_by_path(img_path)\n if img:\n self.__session.delete(img)\n self.commit()\n else:\n print('No such image')", "def del_image(request):\n if not request.user.is_authenticated():\n return HttpResponse(-1)\n img_name = request.POST.get(\"img_name\", \"\")\n if img_name == \"\":\n return HttpResponse(-2)\n file = settings.MEDIA_ROOT + \"/upload/\" + img_name\n if os.path.exists(file):\n os.remove(file)\n return HttpResponse(0)\n return HttpResponse(-3)", "def delete_image_member(self, img, project_id):\r\n return img.delete_member(project_id)", "def delete_image(self, index):\n if isinstance(index, int) == False or index > self.maximum_image_count:\n raise Exception(\n \"Index for deletion should be smaller integer than maximum_image_count\")\n # Delete the image from the image list by\n # poping the entry out of the dictionary!\n self.image_list.pop(index, None)", "def remove_answer_image(answer_id):\n SQL = \"\"\"UPDATE answer SET image = NULL WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))", "def gameDeleteHandler(sender, instance, **kwargs):\n instance.gameimage.delete(save=False)", "def test_delete_image(self):\r\n image = Image.objects.create(\r\n archive=self.archive,\r\n name='image'\r\n )\r\n\r\n response = self.client.delete(image_detail_url(image.id))\r\n exists = Image.objects.filter(name='image').exists()\r\n\r\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\r\n self.assertFalse(exists)", "def screenshot_deleted(req, screenshot):", "def delete_image_tag(self, img, tag):\r\n return img.delete_tag(tag)", "def delete_image(self, node_image):\r\n\r\n raise NotImplementedError(\r\n 'delete_image not implemented for this driver')", "def del_pic( request ):\n result = {}\n try:\n picid = request.POST['picid']\n pic_instance = Pic.objects.get(pk = picid)\n pic_url = os.path.join(settings.MEDIA_ROOT, pic_instance.link.name)\n \n #delete picture file\n if os.path.isfile(pic_url):\n os.remove(pic_url)\n \n pic_instance.delete()\n result['status'] = 'OK'\n result['msg'] = 'OK'\n \n except Pic.DoesNotExist:\n print '[Error] can not find the picture', picid\n result['status'] = 'ERROR'\n result['msg'] = 'can not find the picture'\n\n return HttpResponse(json.dumps(result), content_type='application/json')", "def delete_a_image(answer_id):\n current_image = get_answer_image(answer_id)\n if current_image:\n remove_answer_image(answer_id)\n try:\n os.remove(\"static/uploads/\" + current_image)\n except FileNotFoundError:\n pass", "def delete_image(self, offset, total):\n idx = self._idx + offset\n try:\n obj = self.__getitem__(idx)\n except IndexError:\n return None\n\n self._backup.append((idx, obj))\n\n del self._filenames[idx]\n obj.delete()\n\n if self._idx > 0 and total / 2 > offset:\n self._idx -= 1\n self._load(self._idx - self.PRELOAD_RANGE)\n else:\n self._load(self._idx + self.PRELOAD_RANGE + 1)\n\n return obj", "def _delete_image(self, context, image_id, image_service) -> None:\n try:\n image_meta = image_service.show(context, image_id)\n image_status = image_meta.get('status')\n if image_status == 'queued' or image_status == 'saving':\n LOG.warning(\"Deleting image in unexpected status: \"\n \"%(image_status)s.\",\n {'image_status': image_status},\n resource={'type': 'image', 'id': image_id})\n image_service.delete(context, image_id)\n except Exception:\n LOG.warning(\"Image delete encountered an error.\",\n exc_info=True, resource={'type': 'image',\n 'id': image_id})", "def delete_images_on_board(self, board_id: str):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function return all annotatees list for user uid
def db_annotater_get_user_annotatees(uid): annotatees = AnnotationTask.objects.filter(annotator_id=uid).values('subject','no_album', 'finished') return annotatees
[ "def get_annotations_for_user_id(annotations, user_id):\n rows = annotations[\"rows\"]\n return [r for r in rows if r[\"user\"][\"id\"] == user_id]", "def get_annotations_keyed_by_user_id(annotations):\n rows = annotations[\"rows\"]\n annotations_by_user = {}\n for r in rows:\n user_id = r[\"user\"][\"id\"]\n annotations_by_user.setdefault(user_id, []).append(r)\n return annotations_by_user", "def get_annotated_results(user, result_set):\n\n # Get all the ids of items we've found\n opp_ids = [result.item_id for result in result_set.results]\n\n # mark the items the user is interested in\n (user_interests, ordered_event_ids) = get_user_interests(user, True)\n\n # note the interest of others\n others_interests = get_interest_for_opportunities(opp_ids)\n\n return annotate_results(user_interests, others_interests, result_set)", "def get_queryset(self):\n\n return Annotation.objects.filter(owner__user__username=self.kwargs['username'],\n location_name__icontains=self.kwargs['keyword'],\n owner__isdeleted=False,\n owner__user_privacy=False)", "def db_get_annotation_terms(uid):\n\tterms = {}\n\tterms_queryset = AnnotationTerm.objects.filter(Q(private=False) | Q(user=uid)).values('concept', 'category')\n\t# all public terms\n\tfor term_attr in terms_queryset:\n\t\t# get attributes\n\t\tcategory \t= str(term_attr['category']).strip()\n\t\tconcept\t\t= str(term_attr['concept']).strip()\n\t\tif category in terms:\n\t\t\tterms_list = terms[category] # here is the refer, not a copy\n\t\t\tterms_list.append(concept)\n\t\telse:\n\t\t\tterms[category] = [concept]\n\treturn terms", "def get_annot_notes(ibs, aid_list):\n annotation_notes_list = ibs.db.get(ANNOTATION_TABLE, ('annot_note',), aid_list)\n return annotation_notes_list", "def _get_all_aids(ibs):\n all_aids = ibs.db.get_all_rowids(ANNOTATION_TABLE)\n return all_aids", "def getNewsFeed(self, userId):\n lst = self.followees[userId]\n lst.add(userId)\n allTweets=[]\n for fellow in lst:\n for x in self.tweets[fellow]:\n allTweets.append(x)\n allTweets.sort(key=lambda x:x[1],reverse=True)\n # print(allTweets)\n return [x[0] for x in allTweets[:10]]", "def fetch_annotations_by_course(context_id, user_id):\n annotation_db_credentials = get_annotation_db_credentials_by_course(context_id)\n\n results = {\"rows\": [], \"totalCount\": 0}\n for credential in annotation_db_credentials:\n db_url = credential[\"annotation_database_url\"].strip()\n db_apikey = credential[\"annotation_database_apikey\"]\n db_secret = credential[\"annotation_database_secret_token\"]\n annotator_auth_token = retrieve_token(user_id, db_apikey, db_secret)\n logger.debug(\n \"Fetching annotations with context_id=%s database_url=%s\"\n % (context_id, db_url)\n )\n data = _fetch_annotations_by_course(context_id, db_url, annotator_auth_token)\n # logger.debug(\"Annotations fetched: %s\" % data)\n if \"rows\" in data:\n results[\"rows\"] += data[\"rows\"]\n if \"totalCount\" in data:\n results[\"totalCount\"] += int(data[\"totalCount\"])\n return results", "def get_distinct_users_from_annotations(annotations, sort_key=None):\n\n def _default_sort_key(user):\n return user[\"id\"]\n\n rows = annotations[\"rows\"]\n annotations_by_user = {}\n for r in rows:\n user_id = r[\"user\"][\"id\"]\n if user_id not in annotations_by_user:\n annotations_by_user[user_id] = r[\"user\"]\n users = list(\n sorted(\n annotations_by_user.values(),\n key=sort_key if sort_key else _default_sort_key,\n )\n )\n return users", "def find_info(data):\n lst = []\n for line in data['users']:\n var_l = [line['screen_name'], line['name'], line['location']]\n lst.append(var_l)\n return lst", "def get_annotations_and_ids(self):\n return self.annotations.copy(), self.annotated_img_ids.copy()", "def get_suggestions(self, user_id, limit):\n suggestions = []\n user_id = unicode(user_id)\n if limit == 0:\n return suggestions\n user_bookmarks = self._mk.Bookmark.find(\n {'owner':user_id})\n user_bookmarks = [u_b for u_b in user_bookmarks]\n user_urls = [u_b.url for u_b in user_bookmarks]\n for bookmark in user_bookmarks:\n same_urls = self._mk.Bookmark.find(\n {'url': bookmark.url })\n for same_url in same_urls:\n if same_url.owner == user_id:\n continue\n for circle_id in same_url.circles:\n circle = self.get_circle(circle_id)\n for suggested_id in circle.bookmarks:\n suggestion = self.get_bookmark(suggested_id)\n if suggestion.url in user_urls:\n continue\n if suggestion.url in suggestions:\n continue\n suggestions.append(suggestion.url)\n if len(suggestions) >= limit:\n return suggestions\n return suggestions", "def annotation_tuples_from_file(self, document):\n annotations = []\n f = file(self.mpqa_root + document, 'r')\n tmp = f.read()\n f.close()\n for tuple in self.getmpqatuples(document, 'annotations'):\n annotations.append(tuple)\n #print annotations\n annotations.sort(key=lambda x: (x[1].start))\n #print annotations\n return annotations", "def get_queryset(self):\n\n return Annotation.objects.filter(owner__user_community=self.kwargs['community'],\n owner__isdeleted=False,)", "def get_annotations(self, analyses):\n raise NotImplementedError(\"Getting annotations is not yet supported.\")", "def get_reviewers(self):\n match = reviewer_regex.match(self.body)\n if not match:\n return []\n return [x.strip('@ ') for x in match.group(1).split(',')]", "def load_annotations(self):\n data_infos = []\n gt_paths = self.scan_folder(self.gt_folder)\n for gt_path in gt_paths:\n data_infos.append(dict(gt_path=gt_path))\n return data_infos", "def get_image_aids(ibs, gid_list):\n # print('gid_list = %r' % (gid_list,))\n # FIXME: MAKE SQL-METHOD FOR NON-ROWID GETTERS\n colnames = ('annot_rowid',)\n aids_list = ibs.db.get(ANNOTATION_TABLE, colnames, gid_list,\n id_colname='image_rowid', unpack_scalars=False)\n #print('aids_list = %r' % (aids_list,))\n return aids_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function saves all user annotation from the interface into the database
def db_annotater_insert_user_annotation(uid, image_ids, annotation_terms): try: for iid in image_ids: for term in annotation_terms: aid = AnnotationTerm.objects.filter(concept=term)[0].id #print aid #print "---aid-----" annotation_action = AnnotationAction(annotator=User(id=uid), image=Picture(id=iid), concept=AnnotationTerm(id=aid)) annotation_action.save() except ValidationError: print "ValidationError"#to be modified return
[ "def save_annotations_to_file():\n sid = request.form['sid']\n onsets = list(map(float, request.form['onsets'].split(',')))\n durations = list(map(float, request.form['durations'].split(',')))\n # TODO: Clean this up for descriptions with commas\n descriptions = request.form['descriptions'].split(',')\n # Find file\n filename = getFilenameBySid(sid)\n # Load as raw\n fif = FIFReader(filename)\n # Set annotations\n fif.set_annotations(onsets, durations, descriptions)\n # Save back to file\n fif.save()\n # Return the saved annotations\n return fif.get_annotations_as_df().to_json()", "def update_users_data():\n for user in User.objects.all():\n username = user.name\n for i, (video_id, valence, arousal, emotion) in enumerate(data_handle.init_valence_arousal_it()):\n print('updating video {}'.format(video_id))\n try:\n Annotation.objects.get(video_id=video_id, annotator=username)\n except Annotation.DoesNotExist:\n annotation = Annotation(video_id=video_id,\n valence=valence,\n arousal=arousal,\n emotion=emotion,\n annotator=username)\n annotation.save()\n return 'Users data successfully updated'", "def post_annotations(self):\n annotations_url = self.url + \"/annotations\"\n requests.post(annotations_url, json=self.annotations, auth=self.auth)", "def update_anno_dicts(self,rows):\n for row in rows:\n raw = HypothesisRawAnnotation(row) \n if raw.user in self.excluded_users:\n continue\n id = raw.id\n user = raw.user\n refs = raw.references\n if self.anno_dict.get(id) == None:\n print 'adding %s to anno_dict' % id \n self.anno_dict.set(id, json.dumps(row))\n print 'incrementing anno count for %s' % user\n self.increment_index(self.user_anno_counts, user)", "def _on_save_clicked(self):\n # Check whether annotations should be overwritten.\n if not overwrite_check_passed(\n file_path=self._save_destination.value, output_type=\"annotation export\"\n ):\n return\n\n annotations = self._last_selected_label_layer.features.loc[\n :, [self._label_column, \"annotations\"]\n ]\n # pylint: disable=C0103\n df = add_annotation_names(\n df=pd.DataFrame(annotations), ClassSelection=self.ClassSelection\n )\n\n df.to_csv(self._save_destination.value)\n napari_info(f\"Annotations were saved at {self._save_destination.value}\")", "def save_all(self):\n\t\tfor tweet in self.list_of_tweets:\n\t\t\tself.__save_tweet(tweet)\n\t\tself.list_of_tweets = []\n\n\t\tlog.info(\"Save all tweets\")\n\n\t\tfor user in self.list_of_users:\n\t\t\tself.__save_user(user)\t\n\t\tself.list_of_users = []\n\n\t\tlog.info(\"Save all users\")", "def saving_only_annotations(path,img ,xmin, xmax, ymin, ymax,name_damage, img_name):\n name = (path + '/'+ name_damage+\"_\"+img_name+ \"adionis_.jpg\")\n annotation = img[ymin:ymax, xmin:xmax]\n cv2.imwrite(name, annotation)\n print(\"saving image\")", "def set_annotations_and_ids(self, annotations, ids):\n self.annotations = annotations\n self.annotated_img_ids = ids", "def add_annotations(self, annotations: Iterable[FeatureStructure]):\n for annotation in annotations:\n self.add_annotation(annotation)", "def annotate_all(self):\n logger.info(\"Annotating data\")\n self.genomic_df = self.genomic_df.merge(\n self.annotation_df, how=\"left\", on=[\"IDENTIFIER\"]\n )\n self.genomic_df = self._string_split(self.genomic_df, \"GENE\", \",\")\n self.annotate = True", "def create_annotations(self) -> None:\n pass", "def save_multiple_user(self):\n self.new_user.save_user()", "def add_annotations(self, annotations):\n\n if not isinstance(annotations, list):\n print('Image.add_annotations expects a list, received {}'.format(type(annotations)))\n exit(1)\n\n self.annotations += annotations\n self.is_annotated = True", "def add_users_annotations(self, userid, tag, force=False, schedule_in=None):\n self._db.execute(\n Job.__table__.insert().from_select(\n [Job.name, Job.scheduled_at, Job.priority, Job.tag, Job.kwargs],\n select(\n [\n text(\"'sync_annotation'\"),\n text(f\"'{self._datetime_at(schedule_in)}'\"),\n text(\"100\"),\n text(repr(tag)),\n func.jsonb_build_object(\n \"annotation_id\", Annotation.id, \"force\", force\n ),\n ]\n ).where(Annotation.userid == userid),\n )\n )\n mark_changed(self._db)", "def _save_annotation(annotation, filename):\n pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))\n with tf.gfile.Open(filename, mode='w') as f:\n #NOTE: maybe this \n pil_image.save(f, 'PNG')", "def update_photo_dicts(self,rows):\n for row in rows:\n raw = HypothesisRawAnnotation(row) \n if raw.user in self.excluded_users:\n continue\n if self.user_icons.get(raw.user) is None:\n print 'adding photo for %s' % raw.user\n self.user_icons.set(raw.user, self.get_user_twitter_photo(raw.user))", "def set_annot_thetas(ibs, aid_list, theta_list):\n ibs.delete_annot_chips(aid_list) # Changing theta redefines the chips\n id_iter = ((aid,) for aid in aid_list)\n val_list = ((theta,) for theta in theta_list)\n ibs.db.set(ANNOTATION_TABLE, ('annot_theta',), val_list, id_iter)", "def _write_annotation(filename, annotation):\n _mkdir(os.path.dirname(filename))\n save_pbobject_as_json(annotation, filename)", "def setAnnotation(*args, **kwargs):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the events manager can find upcoming events
def test_get_future_events(self): upcoming_events = Event.objects.upcoming_events() # There are 2 upcoming events assert len(upcoming_events) == 10 # They should all start with upcoming assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
[ "def test_search_events(self):\n pass", "def testGetEvents(self):\n self.activity.type = \"event\"\n self.activity.depends_on = \"True\"\n self.activity.name = \"name\"\n self.activity.pub_date=datetime.datetime.today()\n self.activity.expire_date=datetime.datetime.today() + datetime.timedelta(days=7)\n self.activity.event_date = datetime.datetime.today()\n \n self.activity.save()\n \n activities = get_available_activities(self.user)\n if self.activity in activities:\n self.fail(\"Event is listed in the activity list.\")\n \n events = get_available_events(self.user)\n \n if self.activity.id != events[0][\"id\"]:\n self.fail(\"Event is not listed in the events list.\")", "def test_live(self):\n user = User.objects.create_user(\n 'foo', 'bar@example.com', 'secret'\n )\n event = create_event(\n start_date=(2014, 5, 1),\n end_date=(2014, 5, 1),\n created_by=user,\n title=\"kowabunga\",\n description=\"Testing 1 2 3\",\n repeat=\"BIWEEKLY\",\n utc=True\n )\n event2 = create_event(\n start_date=(2014, 6, 1),\n end_date=(2014, 6, 1),\n created_by=user,\n title=\"kowabunga\",\n description=\"Testing 1 2 3\",\n repeat=\"WEEKDAY\",\n utc=True\n )\n event3 = create_event(\n start_date=(2014, 5, 2),\n end_date=(2014, 5, 4),\n created_by=user,\n title=\"gnarly\",\n description=\"Testing 1 2 3\",\n repeat=\"NEVER\",\n utc=True\n )\n event4 = create_event(\n start_date=(2014, 4, 2),\n end_date=(2014, 4, 4),\n created_by=user,\n title=\"tubular\",\n description=\"Testing 1 2 3\",\n repeat=\"WEEKLY\",\n end_repeat=date(2014, 5, 2),\n utc=True\n )\n event.save()\n event2.save()\n event3.save()\n event4.save()\n now = make_aware(datetime(2014, 5, 6), utc)\n events = Event.objects.live(now)\n self.assertEqual(len(events), 2)\n self.assertEqual(events[0].title, event.title)\n self.assertEqual(events[0].pk, event.pk)\n self.assertEqual(events[1].title, event2.title)\n self.assertEqual(events[1].pk, event2.pk)", "def test_get_run_events(self):\n pass", "def test_no_events_query_success(self):\n endpoint_url = get_all_events_endpoint_url()\n response = client.get(endpoint_url)\n assert check_get_all_events_response_valid(response, 0)", "def test_get_multi_run_events(self):\n pass", "def test_week_upcoming(self):\n pass", "def test_10_api_can_get_all_free_events(self):\n response = self.app.get('/api/events/free', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 0)", "def test_get_all_events(self):\n\n response = client.get(\"/api/event\")\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_event_model(self):\n name = '♪┏(・o・)┛♪┗ ( ・o・) ┓♪'\n url = 'myCoolParty'\n location = 'da street!'\n add_user()\n user_id = User.query.first().id\n availability = create_availability()\n add_event(url=url,\n name=name,\n location=location,\n user_id=user_id,\n availability=availability)\n event = Event.query.filter_by(url=url).first()\n\n self.assertEqual(event.name, name)\n self.assertEqual(event.location, location)", "def test_get_inbox_replier_events(self):\n pass", "def test_no_events(self, db, client):\n response = client.get(reverse(\"events:by-semester\", args=[\"spring\", 2099]))\n assert response.status_code == 404", "def get_upcoming_events(self, number):\n return # osid.calendaring.EventList", "def test_generate_event(self):\n pass", "def testInit(self):\n event_tester = EventTester()\n self.assertEqual(event_tester.events, [])", "def test_07_api_can_get_all_events(self):\n response = self.app.get('/api/events', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 2)", "def test_upcoming_event_description_parser(self):\n\n future_date = timezone.now() + timedelta(days=2)\n future_date2 = future_date + timedelta(days=3)\n\n # creating event description\n urls_to_parse = ['www.thefirsturl.com', 'http://www.thefirsturl.com', 'https://www.thefirsturl.com', 'www.thefirsturl.com/somepage.html']\n event_description = \" some text \".join(urls_to_parse)\n\n # post call url for event creatoin\n new_event_create_url = testing_urls.create_event_url(self.org_id)\n\n # creating new event with urls in description\n\n new_event = self._post_new_event(\n new_event_create_url,\n event_description,\n self.org_user_1.id,\n future_date2.strftime('%Y-%m-%d'),\n future_date,\n future_date2\n )\n\n new_event_detail_url = testing_urls.event_detail_or_edit_url(new_event.id)\n new_edit_event_url = testing_urls.event_detail_or_edit_url(\n new_event.id,\n edit=True\n )\n\n # asserting parsed urls after event creation\n self._assert_parsed_urls(new_event_detail_url, urls_to_parse)\n\n # asserting parsed urls during event edition:\n self._assert_parsed_urls(new_edit_event_url, urls_to_parse)\n\n # now saving the event after editing\n new_event = self._post_new_event(\n new_edit_event_url,\n event_description,\n self.org_user_1.id,\n future_date2.strftime('%Y-%m-%d'),\n future_date,\n future_date2\n )\n\n # asserting parsed urls after edition:\n self._assert_parsed_urls(new_event_detail_url, urls_to_parse)", "def test_participant_event():\n\n event = events.get(1)\n user = users.get(1)\n\n event.participants.append(user)\n\n assert user in event.participants", "def test_season_upcoming(self):\n pass", "def can_lookup_recurring_events(self):\n return # boolean" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a graph to a dot file.
def to_dot( g, dname="tmp", fname="test", extension=".dot", return_fname=False, ortho=False, fi_labels=False, ): # Layout if fi_labels: for e in g.edges(): g.edges()[e]["label"] = "{0:.2f}".format(g.edges()[e].get("fi", 0)) dot = nx.drawing.nx_pydot.to_pydot(g) dot.set("rankdir", "BT") if ortho: dot.set("splines", "ortho") # To file full_fname = os.path.join(dname, fname + extension) with open(full_fname, "w") as f: print(dot.to_string(), file=f) if return_fname: return full_fname else: return
[ "def write_dot_file(self, out_file_path):\n nx.nx_agraph.write_dot(self, out_file_path)", "def _write_dot(self):\n if self.dot_file:\n write_dot(self.graph, self.dot_file)", "def write_graph(self, filename):\n pass", "def writeDOT(G, filename, directed=False):\n writefile = open(filename, 'wt')\n if directed:\n writefile.write('digraph G {\\n')\n else:\n writefile.write('graph G {\\n')\n name = {}\n nextname = 0\n for v in G.V():\n name[v] = nextname\n nextname += 1\n options = 'penwidth=3,'\n if hasattr(v, 'label'):\n options += 'label=\"' + str(v.label) + '\",'\n if hasattr(v, 'colortext'):\n options += 'color=\"' + v.colortext + '\",'\n elif hasattr(v, 'colornum'):\n options += 'color=' + str(v.colornum % numcolors + 1) + ', colorscheme=' + defaultcolorscheme + ','\n if v.colornum >= numcolors:\n options += 'style=filled,fillcolor=' + str(v.colornum // numcolors + 1) + ','\n if len(options) > 0:\n writefile.write(' ' + str(name[v]) + ' [' + options[:-1] + ']\\n')\n else:\n writefile.write(' ' + str(name[v]) + '\\n')\n writefile.write('\\n')\n\n for e in G.E():\n options = 'penwidth=2,'\n if hasattr(e, 'weight'):\n options += 'label=\"' + str(e.weight) + '\",'\n if hasattr(e, 'colortext'):\n options += 'color=\"' + e.colortext + '\",'\n elif hasattr(e, 'colornum'):\n options += 'color=' + str(e.colornum % numcolors + 1) + ', colorscheme=' + defaultcolorscheme + ','\n if e.colornum >= numcolors:\n options += 'style=filled,fillcolor=' + str(e.colornum // numcolors + 1) + ','\n if len(options) > 0:\n options = ' [' + options[:-1] + ']'\n if directed:\n writefile.write(' ' + str(name[e.tail()]) + ' -> ' + str(name[e.head()]) + options + '\\n')\n else:\n writefile.write(' ' + str(name[e.tail()]) + '--' + str(name[e.head()]) + options + '\\n')\n\n writefile.write('}')\n writefile.close()", "def _write_dot(cls, destination, meta_dependencies, meta_rev_dependencies):\n with open(destination, \"w\") as out:\n out.write(\"digraph G {\\n\")\n out.write(\" graph [ dpi = 75 ];\\n\")\n out.write(\" node [shape=record,width=.1,height=.1];\\n\")\n out.write(\" splines=ortho;\\n\\n\")\n\n for node, dependencies in meta_dependencies.iteritems():\n node_id = \"Node_%i\" % (id(node),)\n node_type = node.__class__.__name__\n if node_type.endswith(\"Node\"):\n node_type = node_type[:-4]\n\n rank = None\n color = \"white\"\n if not meta_dependencies.get(node):\n color = \"red\"\n elif not meta_rev_dependencies.get(node):\n color = \"green\"\n rank = \"sink\"\n\n if rank is not None:\n out.write(\" {\")\n out.write(\" rank = %s;\\n \" % (rank,))\n\n out.write(' %s [label=\"%s\"; fillcolor=%s; style=filled]\\n'\n % (node_id, node_type, color))\n\n if rank is not None:\n out.write(\" }\")\n\n for dependency in dependencies:\n dep_id = \"Node_%i\" % (id(dependency),)\n out.write(\" %s -> %s\\n\" % (dep_id, node_id))\n out.write(\"\\n\")\n\n out.write(\"}\\n\")\n\n return True", "def serialize_dot(gviz: Digraph) -> bytes:\r\n dot = str(gviz)\r\n f = BytesIO()\r\n f.write(dot.encode(constants.DEFAULT_ENCODING))\r\n return f.getvalue()", "def plot_dot_graph(graph, filename=None):\n if not plot.pygraphviz_available:\n logger.error(\"Pygraphviz is not installed, cannot generate graph plot!\")\n return\n if not plot.PIL_available:\n logger.error(\"PIL is not installed, cannot display graph plot!\")\n return\n\n agraph = AGraph(graph)\n agraph.layout(prog='dot')\n if filename is None:\n filename = tempfile.mktemp(suffix=\".png\")\n agraph.draw(filename)\n image = Image.open(filename)\n image.show()", "def write_nx_graph(graph, filename):\n fx = open(filename, \"w\")\n fx.write(\"digraph grn\\n{\\n\")\n for edge in graph.edges():\n fx.write(\" %s -> %s [label=%d]\\n\" % edge)\n \n fx.write(\"}\")\n fx.close()", "def from_dot(self, in_file): \n start = time.time()\n g = nx.drawing.nx_pydot.read_dot(in_file)\n end = time.time()\n print(\"Time taken for graph formation\", end - start)\n return g", "def export_to_dot(self):\n return nx_pydot.to_pydot(self).to_string()", "def save_dot(self, output, horizontal=False):\n if horizontal:\n graph = pydot.Dot(\"iupac_tree\", rankdir=\"LR\")\n else:\n graph = pydot.Dot(\"iupac_tree\")\n for node in range(len(self.parse_tree.nodes)):\n graph.add_node(pydot.Node(node, label=self.parse_tree.nodes[node][\"type\"].get_name(full=True)))\n for edge in self.parse_tree.edges():\n graph.add_edge(pydot.Edge(*edge[::-1], label=self.parse_tree.get_edge_data(*edge)[\"type\"]))\n graph.write(output)\n return graph", "def dot_to_png(dot_path=\"tree.dot\", png_path=\"tree.png\"):\n png.create_png(dot_path, png_path)", "def view_dot_graph(graph, filename=None, view=False):\n # Optionally depends on graphviz package\n import graphviz as gv\n\n src = gv.Source(graph)\n if view:\n # Returns the output file path\n return src.render(filename, view=view)\n else:\n # Attempts to show the graph in IPython notebook\n try:\n __IPYTHON__\n except NameError:\n return src\n else:\n import IPython.display as display\n format = 'svg'\n return display.SVG(data=src.pipe(format))", "def _dot_to_dagitty_dag(dot_file_path):\n # dot_graph = pydot.graph_from_dot_data(dot_file_path)\n # dot_string = \"dag {\" + \"\\n\".join([e.to_string() for e in dot_graph[0].get_edges()]) + \"}\"\n dot_graph = pygraphviz.AGraph(dot_file_path)\n dot_string = (\n \"dag {\" + \"\\n\".join([f\"{s1} -> {s2};\" for s1, s2 in dot_graph.edges()]) + \"}\"\n )\n dag_string = dot_string.replace(\"digraph\", \"dag\")\n return dag_string", "def generate_dot_graph(self, file=None):\n lines = []\n lines.append(\"digraph G {\")\n if len(self.provided) > 0:\n lines.append(\" \" + self._generate_bus_node())\n for node in self.nodes.values():\n if node.item_type in self.provided:\n continue\n if not self._is_explicitly_reachable(node):\n continue\n include_rate_in_node_label = \\\n len(self.parent_ptrs[node.item_type._name]) != 1\n lines.append(\" \" + node.get_dot_nodespec(\n include_rate=include_rate_in_node_label))\n for edgespec in node.get_dot_edgespecs(self.provided):\n lines.append(\" \" + edgespec)\n lines.append(\" \" + self._get_legend())\n lines.append(\"}\")\n return \"\\n\".join(lines)", "def graph_to_graphviz(computation_graph):\n dot = graphviz.Digraph(format=\"pdf\")\n dot.node(\"op-root\", shape=\"box\")\n for (i, op) in enumerate(computation_graph.operation):\n if op.HasField(\"task\"):\n dot.node(\"op\" + str(i), shape=\"box\", label=str(i) + \"\\n\" + op.task.name.split(\".\")[-1])\n for res in op.task.result:\n dot.edge(\"op\" + str(i), str(res))\n elif op.HasField(\"put\"):\n dot.node(\"op\" + str(i), shape=\"box\", label=str(i) + \"\\n\" + \"put\")\n dot.edge(\"op\" + str(i), str(op.put.objectid))\n elif op.HasField(\"get\"):\n dot.node(\"op\" + str(i), shape=\"box\", label=str(i) + \"\\n\" + \"get\")\n creator_operationid = op.creator_operationid if op.creator_operationid != 2 ** 64 - 1 else \"-root\"\n dot.edge(\"op\" + str(creator_operationid), \"op\" + str(i), style=\"dotted\", constraint=\"false\")\n for arg in op.task.arg:\n if len(arg.serialized_arg) == 0:\n dot.node(str(arg.objectid))\n dot.edge(str(arg.objectid), \"op\" + str(i))\n return dot", "def graph_to_file( g, output_filepath = None ):\n if not output_filepath:\n _outfn = 'output/workflows_output.rdf'\n else: _outfn = output_filepath\n g.serialize( _outfn )\n print(\"Written \"+str(len(g))+\" triples to \" + _outfn)", "def convert_to_dot(self, _with_pruning_ratio=False, show_probabilities=True):\n s = 'digraph DT{\\n'\n s += 'node[fontname=\"Arial\"];\\n'\n s += self.convert_node_to_dot(_with_pruning_ratio=_with_pruning_ratio, show_probabilities=show_probabilities)\n s += '}'\n return s", "def as_dot(self):\r\n node_labels = ''\r\n for (id, node) in self._node_list(0):\r\n node_labels += '{0}[label=\"{1}\"];\\n'.format(id, node)\r\n\r\n (_, tree) = self._as_dot(0)\r\n\r\n return 'graph ast {{\\n{0}\\n{1}\\n}}'.format(node_labels, tree)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loops though a list of sequences and applies the given function to each to get the corresponding tags. Also handles printing output.
def tag_all(sequence_list, tagger, normaliser=None, output_file=None): out = [] start = time.time() total_sents = len(sequence_list) for i, unlabeled_sequence in enumerate(sequence_list, start=1): print "Sentence {0} ({1:2.2f}%)".format(i, float(i)/len(sequence_list) * 100) display = [unlabeled_sequence] t0 = time.time() if normaliser is not None: normalised_seq = normaliser.sentence(unlabeled_sequence) display += [normalised_seq] tags = tagger(normalised_seq) else: tags = tagger(unlabeled_sequence) display += [tags] t1 = time.time() print matrix_to_string(display) print "Time:", '%.3f' % (t1 - t0), ", Per word:", '%.3f' % ((t1 - t0) / len(unlabeled_sequence)) print "Estimated time:", datetime.timedelta(seconds=(t1 - start) / i * (total_sents - i)), "\n" out += [zip(unlabeled_sequence, tags)] return out
[ "def markupSeq(seq, ulPosList, boldPosList, annots = {}):\n annotStarts = {}\n annotEnds = defaultdict(set)\n for (start, end), aDict in annots.iteritems():\n annotStarts[start] = aDict\n aDict[\"end\"] = end\n\n ulStarts = set([x[0] for x in ulPosList])\n ulEnds = set([x[1] for x in ulPosList])\n boldStarts = set([x[0] for x in boldPosList])\n boldEnds = set([x[1] for x in boldPosList])\n ret = []\n openAnnots = defaultdict(int) # current number of open spans, per cssString\n openTags = set()\n for i, nucl in enumerate(seq):\n if i in annotEnds:\n for tagStr in annotEnds[i]:\n if tagStr in openAnnots:\n openAnnots[tagStr]-=1\n if openAnnots[tagStr]==0:\n ret.append(\"</span>\")\n del openAnnots[tagStr]\n\n if i in annotStarts:\n aDict = annotStarts[i]\n cssParts = []\n for key, val in aDict[\"css\"].iteritems():\n cssParts.append(\"%s:%s\" % (key, val))\n cssStr = \";\".join(cssParts)\n tagStr = \"<span style='%s'>\" % cssStr\n if not tagStr in openAnnots:\n ret.append(tagStr)\n openAnnots[tagStr]+=1\n annotEnds[aDict[\"end\"]].add(tagStr)\n\n if i in ulStarts:\n ret.append(\"<u>\")\n openTags.add(\"u\")\n if i in ulEnds:\n ret.append(\"</u>\")\n if \"u\" in openTags:\n openTags.remove(\"u\")\n if i in boldStarts:\n ret.append(\"<b>\")\n openTags.add(\"b\")\n if i in boldEnds:\n ret.append(\"</b>\")\n if \"strong\" in openTags:\n openTags.remove(\"b\")\n ret.append(nucl)\n if (i+1) % 80==0:\n ret.append(\"<br>\")\n for tag in openTags:\n ret.append(\"</%s>\" % tag)\n return \"\".join(ret)\n #return seq[:start]+\"<u>\"+seq[start:end]+\"</u>\"+seq[end:]", "def print_tags(text,tags,indent=0):\n for tag,l,r,subtags in tags:\n tagname = repr(tag)\n if len(tagname) > 20:\n tagname = tagname[:20] + '...'\n target = repr(text[l:r])\n if len(target) > 60:\n target = target[:60] + '...'\n if subtags == None:\n print ' '+indent*' |',tagname,': ',target,(l,r)\n else:\n print ' '+indent*' |',tagname,': ',target,(l,r)\n print_tags(text,subtags,indent+1)", "def tag_to_func(tag_file, list_fun_list, all_tags_flag):\n with open(tag_file) as f:\n if all_tags_flag:\n for line in f:\n elements = line.strip().split('\\t')\n if (len(elements) >= 4):\n elements[2] = elements[2][:-2]\n update_list(list_fun_list, elements[1], elements[2], elements[0])\n else: # Only functions \n for line in f:\n elements = line.strip().split('\\t')\n if (len(elements) >= 4) and (elements[3] == 'f'):\n elements[2] = elements[2][:-2]\n update_list(list_fun_list, elements[1], elements[2], elements[0])", "def tag_word (lx,wd):\n # add code here\n printlist = []\n for nom in function_words_tags:\n if nom[0] == wd:\n add(printlist, nom[1])\n\n if len(printlist) == 0:\n if wd in lx.getAll('P'):\n\t add(printlist, 'P')\n\n\tif wd in lx.getAll('A'):\n\t add(printlist, 'A')\n\n if wd in lx.getAll('N'):\n if wd in unchanging_plurals:\n add(printlist, 'Ns')\n add(printlist, 'Np')\n if noun_stem(wd) is '':\n add(printlist, 'Ns')\n else:\n add(printlist, 'Np')\n\n\telif noun_stem(wd) in lx.getAll('N'):\n if wd in unchanging_plurals:\n add(printlist, 'Ns')\n add(printlist, 'Np')\n if noun_stem(wd) is not '':\n add(printlist, 'Np')\n\t else: \n\t\tadd(printlist, 'Ns')\n\n if wd in lx.getAll('I'):\n if verb_stem(wd) is '':\n add(printlist, 'Ip')\n else:\n add(printlist, 'Is')\n\n\telif verb_stem(wd) in lx.getAll('I'):\n if verb_stem(wd) is '':\n add(printlist, 'Ip')\n else:\n add(printlist, 'Is')\n\n if wd in lx.getAll('T'):\n if verb_stem(wd) is '':\n add(printlist, 'Tp')\n else:\n add(printlist, 'Ts')\n\n elif verb_stem(wd) in lx.getAll('T'):\n if verb_stem(wd) is '':\n add(printlist, 'Tp')\n else:\n add(printlist, 'Ts')\n\n return printlist\n else:\n return printlist", "def pos_tag_lst(self, lst, output):\n # assumption: every element in the list is a new sentence\n for e in lst:\n for w in self.tagger.tag(word_tokenize(e.strip())):\n output.write(w[0])\n output.write(\"\\t\")\n output.write(w[1])\n output.write(\"\\n\")\n output.write(\"\\n\")", "def report(*sequences):\n for sequence in sequences:\n print(type(sequence).__name__, '--- \\t' + str(sequence))", "def collect(sequence, function):\n for seq in __builtin__.map(function, sequence):\n for x in seq:\n yield x", "def walk(self, callback):\n taglist = sorted(self.keys())\n for tag in taglist:\n data_element = self[tag]\n callback(self, data_element) # self = this Dataset\n # 'tag in self' below needed in case data_element was deleted in callback\n if tag in self and data_element.VR == \"SQ\":\n sequence = data_element.value\n for dataset in sequence:\n dataset.walk(callback)", "def to_tags(seq_len: int, spans: List[Span]) -> list:\n tags = [\"O\"] * seq_len\n for span in spans:\n pos = span.start\n if pos < seq_len:\n tags[pos] = \"B-{0}\".format(span.label)\n pos += 1\n while pos < min(span.end + 1, seq_len):\n tags[pos] = \"I-{0}\".format(span.label)\n pos += 1\n\n return tags", "def tags(self) -> List:", "def mapcat(func, seqs): # real signature unknown; restored from __doc__\n pass", "def ModifyXMLElements(text, symbol, start_tag_regexp, end_tag_func, callback):\n before_tag = start_tag = end_tag_regexp = end_tag = None\n result = ''\n\n logging.debug('modify xml for symbol: %s, regex: %s, text: [%s]', symbol, start_tag_regexp, text)\n\n m = re.search(start_tag_regexp, text, flags=re.S)\n while m:\n before_tag = text[:m.start()] # Prematch for last successful match string\n start_tag = m.group(0) # Last successful match\n text = text[m.end():] # Postmatch for last successful match string\n # get the matching end-tag for current tag\n end_tag_regexp = end_tag_func(start_tag)\n\n logging.debug('symbol: %s matched start: %s, end_tag: %s, text: [%s]', symbol, start_tag, end_tag_regexp, text)\n\n logging.debug('converting before tag: [%s]', before_tag)\n result += callback(before_tag, symbol, '')\n result += start_tag\n\n m2 = re.search(end_tag_regexp, text, flags=re.S)\n if m2:\n before_tag = text[:m2.start()]\n end_tag = m2.group(0)\n text = text[m2.end():]\n\n logging.debug('symbol: %s matched end %s: text: [%s]', symbol, end_tag, text)\n\n result += callback(before_tag, symbol, start_tag)\n result += end_tag\n else:\n common.LogWarning(GetSymbolSourceFile(symbol), GetSymbolSourceLine(symbol),\n \"Can't find tag end: %s in docs for: %s.\" % (end_tag_regexp, symbol))\n # Just assume it is all inside the tag.\n result += callback(text, symbol, start_tag)\n text = ''\n m = re.search(start_tag_regexp, text, flags=re.S)\n\n # Handle any remaining text outside the tags.\n logging.debug('converting after tag: [%s]', text)\n result += callback(text, symbol, '')\n logging.debug('results for symbol: %s, text: [%s]', symbol, result)\n\n return result", "def tag_all(text):\n sentences = TPU.sentences(text)\n return [TPU.tag_one(sentence) for sentence in sentences]", "def tag_table_data(tags: dict) -> Generator[tuple, None, None]:\n for tag in tags.values():\n if tag.needs_context:\n continue\n yield [tag.name, tag.desc]\n for subtag_name in tag.subtags:\n subtag = tags.get(subtag_name).in_context(tag.name)\n yield [f\"\\u2514 {subtag.name}\", subtag.desc]", "def bio_to_spans(text: List[str], tags: List[str]) -> List[Tuple[int, int, str]]:\n pointer = 0\n starts = []\n for (\n i,\n t,\n ) in enumerate(tags):\n if t.startswith(\"B-\"):\n starts.append((i, pointer))\n pointer += len(text[i]) + 1\n\n spans = []\n for s_i, s_char in starts:\n label_str = tags[s_i][2:]\n e = 0\n e_char = len(text[s_i + e])\n while len(tags) > s_i + e + 1 and tags[s_i + e + 1].startswith(\"I-\"):\n e += 1\n e_char += 1 + len(text[s_i + e])\n spans.append((s_char, s_char + e_char, label_str))\n return spans", "def iterate_seqs(input_alignment, output_file):\n ref_seq, filter_seq = get_ref_and_filter(input_alignment)\n #Iterate through the sequences, updating the filter.\n for seq_record in SeqIO.parse(input_alignment, \"fasta\"):\n filter_seq = compare_seqs(ref_seq, seq_record.seq, filter_seq)\n #Setting all the '2' elements to 0.\n #filter_seq = [0 if elem == 2 else elem for elem in filter_seq]\n #Use the filter to generate a new file.\n for seq_record in SeqIO.parse(input_alignment, \"fasta\"):\n filtered_seq = \"\".join(filter_query(seq_record.seq, filter_seq))\n with open(output_file, \"a\") as f:\n f.write(\">\" + seq_record.description + \"\\n\" + filtered_seq + \"\\n\")\n #Get list of SNP positions.\n pos_counter = 0\n pos_list = []\n for pos in filter_seq:\n if pos:\n pos_list.append(pos_counter)\n pos_counter += 1\n with open(output_file + \".poslist\", \"a\") as f:\n for pos in pos_list:\n f.write((str(pos) + \"\\n\"))", "def convert(func, seq):\n return [func(eachNum) for eachNum in seq]", "def pos_tag_io(self):\n UTF8Reader = codecs.getreader('utf8')\n input_stream = UTF8Reader(sys.stdin)\n UTF8Writer = codecs.getwriter('utf8')\n output_stream = UTF8Writer(sys.stdout)\n\n for line in input_stream:\n for w in self.tagger.tag(word_tokenize(line.strip())):\n output_stream.write(w[0])\n output_stream.write(\"\\t\")\n output_stream.write(w[1])\n output_stream.write(\"\\n\")\n output_stream.write(\"\\n\")", "def colorized_text_generator(fragments):\n for part in fragments:\n if part and part[0] not in config[\"tagsymbols\"]:\n yield colorize(part, color, bold=is_title), part\n elif part:\n yield colorize(part, config[\"colors\"][\"tags\"], bold=True), part" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A Simple PATCH request to the API Server
def sample_patch_request(host, username, password, resource, data): # build the URL url = urlunparse(('https', host, resource, None, None, None)) print "PATCH: %s" % url return requests.patch(url, json=data, auth=HTTPBasicAuth(username, password), verify=False)
[ "def method_patch(self, uri, **kwargs):\r\n return self._api_request(uri, \"PATCH\", **kwargs)", "def update_request():", "def patch(self):\n req_op = self.get_argument('op')\n req_path = self.get_argument('path')\n req_value = self.get_argument('value', None)\n req_from = self.get_argument('from', None)\n\n response = ontology_patch_handler(req_op, req_path, req_value,\n req_from)\n self.write(response)", "def patch(self, *args, **kwargs):\n self.method_not_allowed_error('PATCH')", "def test_patch(self):\n url = reverse(\"users-api:userconfig-list\")\n\n data = {\n \"a\": {\n \"a1\": \"X\",\n \"a2\": \"Y\",\n },\n \"b\": {\n \"b1\": \"Z\",\n },\n }\n response = self.client.patch(url, data=data, format=\"json\", **self.header)\n self.assertDictEqual(response.data, data)\n self.user.refresh_from_db()\n self.assertDictEqual(self.user.config_data, data)\n\n update_data = {\"c\": 123}\n response = self.client.patch(url, data=update_data, format=\"json\", **self.header)\n new_data = deepmerge(data, update_data)\n self.assertDictEqual(response.data, new_data)\n self.user.refresh_from_db()\n self.assertDictEqual(self.user.config_data, new_data)", "def test_disallow_patch_many(self):\r\n response = self.app.patch('/api/person', data=dumps(dict(name='foo')))\r\n assert response.status_code == 405", "def patch(self, endpoint=None, data=None, json=None, callback=None, callback_kwargs=None):\n return self._call(\"PATCH\",\n endpoint=endpoint,\n data=data,\n json=json,\n callback=callback,\n callback_kwargs=callback_kwargs)", "def test_api_can_update_food(self):\n food = Food.objects.first()\n change = {'food': {'name': 'Something new', 'calories': '100'}}\n response = self.client.patch(f'/api/v1/foods/{food.id}', change, format='json')\n js = response.json()\n self.assertEqual(js[\"name\"], \"Something new\")\n self.assertEqual(js[\"calories\"], 100)", "def patch(self, _id: str) -> tuple:\n json_data = json.loads(request.data, encoding='utf-8')\n result = QuestionDao.update(_id, json_data), 200\n if result:\n return result\n api.abort(400)", "def test_request_can_updated_successfully(self):\r\n request_model.requests.clear()\r\n res = self.client().post('/api/v1/request', data=json.dumps(self.request),\r\n headers={\"content-type\": \"application/json\",\r\n \"access-token\": self.token})\r\n res2 = self.client().put('/api/v1/request/1', data=json.dumps(self.update_request),\r\n headers={\"content-type\": \"application/json\",\r\n \"access-token\": self.token})\r\n self.assertEqual(res2.status_code, 202)\r\n self.assertIn(\"request updated!\",str(res2.data))", "def patch(self, request, project_id):\n data = json.loads(request.body)\n action = data.get('action')\n try:\n operation = getattr(self, action.lower())\n except AttributeError:\n LOGGER.error(\n 'Invalid action.',\n extra=request.POST.dict(),\n exc_info=True\n )\n raise ApiException(\n 'Invalid action.',\n 403,\n request.POST.dict()\n )\n return operation(request, project_id, **data)", "def test_custom_client_patch_methods():\n client = BlogTestClient()\n responses.add(responses.PATCH, 'http://dev/api/blogs/1',\n body='''\n {\"id\": 1, \"title\": \"blog title\",\n \"slug\": \"blog-title\",\n \"content\": \"This is some content\"}''',\n status=200,\n content_type='application/json')\n data = {\n \"title\": \"blog title\",\n }\n result = client.patch_blog(uid=1, data=data)\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == 'http://dev/api/blogs/1'\n assert responses.calls[0].request.body == json.dumps(data)\n assert responses.calls[0].request.method == 'PATCH'\n assert isinstance(result, list)\n assert isinstance(result[0], BlogResource)\n resource = result[0]\n assert resource.title == 'blog title'", "def patch(self, uri, auth=None, **kwargs):\n if (auth is None):\n auth = (self.user, self.password)\n return requests.patch(uri, auth=auth, **kwargs)", "def test_update_recipe_invalid_field(client):\n resp = client.patch('/recipe/1', json={'random': 'xxxx'})\n assert resp.status_code == server.HTTP_BAD_REQUEST", "def test_company_updating(self):\n company = Company.objects.create(name='Olidata')\n url = reverse('company-detail', args=[company.id])\n\n response = self.client.patch(url, {}, format='json')\n response_data = response.json()\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_update_comment_of_specific_redflag(self):\n self.app.post(\"/api/v1/red-flags/1/comment\", headers={'Content-Type': 'application/json'},\n data = json.dumps(self.redflag))\n response = self.app.patch(\"/api/v1/red-flags/1/comment\", headers={'Content-Type': 'application/json'},\n data = json.dumps({\"comment\" : \"police wanted money to pass the offense\"}))\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200) \n self.assertIn(\"Successfully updated redflag comment\",\n str(result))", "def _patch_update():\n def patched_update(self, *args, **kwargs):\n \"\"\"\n Patched version of Resource.update which send update requests\n containing only the properties specified as arguments to the\n method. If no properties are specified all of them are sent in the\n request.\n \"\"\"\n # pylint: disable=protected-access\n orig_props = self._properties\n\n # user specified which properties to update: set properties dict\n # to contain only them so that the update request do not update\n # unwanted fields\n if args or kwargs:\n self._properties = dict()\n if '$uri' in orig_props:\n self._properties['$uri'] = orig_props['$uri']\n\n # perform the request\n self._properties.update(*args, **kwargs)\n self.save()\n\n # restore all properties\n if args or kwargs:\n orig_props.update(self._properties)\n self._properties = orig_props\n # patched_update()\n potion_resource.Resource.update = patched_update", "def test_update_recipe_id(client):\n resp = client.patch('/recipe/1', json={'id': '2'})\n assert resp.status_code == server.HTTP_BAD_REQUEST", "def json_patch():\n\n try:\n data = request.json\n\n if isinstance(data, list) and len(data) == 2:\n\n doc = data[0]\n patch = data[1]\n\n result = jsonpatch.apply_patch(doc, patch)\n\n response = jsonify({'data': result})\n return response\n\n else:\n msg = {'errors': [{'title': 'Invalid request',\n 'details': 'Body was not a array of two JSON elements, ex: [{},{}]. '\n 'First element is the JSON doc to patch, second element is the JSON patch'}]}\n return jsonify(msg), 400\n\n except Exception as e:\n msg = {'errors': [{'title': 'Invalid request', 'details': str(e)}]}\n return jsonify(msg), 400" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test when the certificate has expired.
def test_https_expired(self): domain = inspect("expired.badssl.com") basic_check(domain.https) self.assertTrue(domain.https.https_expired_cert)
[ "def test_check_cert(certfile):\n cert = load_cert(certfile)\n\n now = datetime.datetime.utcnow()\n if now > cert.not_valid_after:\n raise Exception(\"Certificate has expired!\")\n\n elif now + datetime.timedelta(hours=20) > cert.not_valid_after:\n print('> Certificate expiring soon: %s' % cert.not_valid_after)\n\n elif now < cert.not_valid_before:\n raise Exception('Certificate is not yet valid!')", "def fail_if_cert_expires_in_timedelta(cert_path, expire_in_threshold_timedelta):\n # type: (str, datetime.timedelta) -> None\n with open(cert_path, \"rb\") as fp:\n content = fp.read()\n\n cert = x509.load_pem_x509_certificate(content, default_backend())\n\n now_dt = datetime.datetime.utcnow()\n expire_in_days = (cert.not_valid_after - now_dt).days\n\n if now_dt + expire_in_threshold_timedelta >= cert.not_valid_after:\n raise Exception(\n (\n \"Certificate %s will expire in %s days (%s), please update!\"\n % (cert_path, expire_in_days, cert.not_valid_after)\n )\n )\n else:\n print(\n \"OK - certificate %s will expire in %s days (%s)\"\n % (cert_path, expire_in_days, cert.not_valid_after)\n )", "def is_expired(self):\n return self.ttl <= 0", "def test_get_agent_certificate_expiration_date(self):\n pass", "def test_vstart_expired_server_cert(request, instance):\n # replace certs\n crts = instance.certsobj\n try:\n # overwrite certificates with quick expiry certs\n (root_ca, server_cert_name, admin_cert_name) = \\\n Certs.get_admin_cert_names(instance.instance_name)\n\n crts.create_signed_cert_files(server_cert_name, cert_type='server',\n fqdn=fqdn, valid_days=0.0001)\n gevent.sleep(9)\n try:\n instance.startup_platform(vip_address=get_rand_vip(), timeout=10)\n except Exception as e:\n assert str(e).startswith(\"Platform startup failed. Please check volttron.log\")\n assert not (instance.is_running())\n # Rabbitmq log would show\n # \"TLS server: In state certify received CLIENT ALERT: Fatal -\n # Certificate Expired\"\n except Exception as e:\n pytest.fail(\"Test failed with exception: {}\".format(e))", "def validate_certificate(self, certificate):\r\n\t\t\r\n\t\tdates = (certificate.not_valid_before.timestamp(),certificate.not_valid_after.timestamp())\r\n\t\tdate_now=datetime.now().timestamp()\r\n\t\treturn dates[0]< date_now < dates[1]", "def expired(self):\n return self.timer >= self.period", "def is_expired(self):\n\n return self.expiration_date() < date.today()", "def is_expired(self, key=None):\n self.logger.info(\"datablock is checking for expired dataproducts\")", "def isExpired(self):\n\t\treturn self.expired", "def test_expiration(incoming, expected):\n assert BackingInstance(\n access_token_expiration=incoming).is_expired == expected", "def check_expiry(self) -> bool:\n time_left = self.expiry - datetime.now(timezone.utc)\n \n if time_left.total_seconds() < 32400:\n return True\n else:\n return False", "def test_is_expired(self):\n refresh_token = self.refresh_token_instance\n refresh_token.created_at = timezone.now()\n refresh_token.save()\n\n self.assertTrue(refresh_token.is_expired)\n self.assertFalse(refresh_token.is_active)", "def has_expired(self):\n return self.time_remaining <= 0", "def check_certificate():\n server = get_odoo_server_url()\n if server:\n path = Path('/etc/ssl/certs/nginx-cert.crt')\n if path.exists():\n with path.open('r') as f:\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())\n cert_end_date = datetime.datetime.strptime(cert.get_notAfter().decode('utf-8'), \"%Y%m%d%H%M%SZ\") - datetime.timedelta(days=10)\n for key in cert.get_subject().get_components():\n if key[0] == b'CN':\n cn = key[1].decode('utf-8')\n if cn == 'OdooTempIoTBoxCertificate' or datetime.datetime.now() > cert_end_date:\n _logger.info(_('Your certificate %s must be updated') % (cn))\n load_certificate()\n else:\n _logger.info(_('Your certificate %s is valid until %s') % (cn, cert_end_date))\n else:\n load_certificate()", "def testExpire(self):\n self._RunAsync(self._short_url.Expire, self._client)\n response = self._RunAsync(self.http_client.fetch, self._url, method='GET')\n self.assertEqual(response.code, 403)", "def has_session_expired(self, expiration_time):", "def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def is_expired(self):\n session.logger.info(f\"Try to check expiration of {self.username} -- {self.date_of_adding}\")\n session.logger.info(f\"time for last connectiong is {datetime.now() - self.date_of_adding}\")\n return datetime.now() - self.date_of_adding > \\\n EXPIRATION_DATE_OF_FAMOUS_ACCOUNT" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test when the certificate has a bad hostname.
def test_https_bad_hostname(self): domain = inspect("wrong.host.badssl.com") basic_check(domain.https) self.assertTrue(domain.https.https_bad_hostname)
[ "def test_invalidHostname(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"wrong-host.example.com\",\n u\"correct-host.example.com\",\n )\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, VerificationError)\n self.assertIsInstance(sErr, ConnectionClosed)", "def test_validHostnameInvalidCertificate(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"valid.example.com\",\n u\"valid.example.com\",\n validCertificate=False,\n )\n\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, SSL.Error)\n self.assertIsInstance(sErr, SSL.Error)", "def test_bad_hostname():\n pytest.xfail(\"Bad hostname.\")\n connect_to_dremio_flight_server_endpoint(\"badHostNamE\",\n \"32010\", \"dremio\", \"dremio123\", False, False, False)", "def test_IPv4AddressHostname(self):\n options = sslverify.optionsForClientTLS(u'127.0.0.1')\n self.assertFalse(options._hostnameIsDnsName)", "def test_dNSNameHostname(self):\n options = sslverify.optionsForClientTLS(u'example.com')\n self.assertTrue(options._hostnameIsDnsName)", "def test_fallback(self):\n name = 'something.example.com'\n class Connection(object):\n def get_peer_certificate(self):\n \"\"\"\n Fake of L{OpenSSL.SSL.Connection.get_peer_certificate}.\n\n @return: A certificate with a known common name.\n @rtype: L{OpenSSL.crypto.X509}\n \"\"\"\n cert = X509()\n cert.get_subject().commonName = name\n return cert\n conn = Connection()\n self.assertIs(\n sslverify.simpleVerifyHostname(conn, u'something.example.com'),\n None\n )\n self.assertRaises(\n sslverify.SimpleVerificationError,\n sslverify.simpleVerifyHostname, conn, u'nonsense'\n )", "def test_hostFromNonSSLTransport(self):\n x = self.assertRaises(CertificateError,\n sslverify.Certificate.hostFromTransport,\n _NotSSLTransport())\n self.assertTrue(str(x).startswith(\"non-TLS\"))", "def test_IPv6AddressHostname(self):\n options = sslverify.optionsForClientTLS(u'::1')\n self.assertFalse(options._hostnameIsDnsName)", "def test_hostFromBlankSSLTransport(self):\n x = self.assertRaises(CertificateError,\n sslverify.Certificate.hostFromTransport,\n _MaybeSSLTransport())\n self.assertTrue(str(x).startswith(\"TLS\"))", "def test_create_host_cert_invalid_alt_name(self):\n cn = 'test.foo-valid.com'\n alt_name = 'test.foo invalid.com'\n cert = SpokeHostCert(cn, self.ca_name)\n self.assertRaises(error.InputError, cert.create, alt_name)", "def test_validate_host_cert(self):\n cn = 'test.valid-cert.com'\n cert = SpokeHostCert(cn, self.ca_name)\n cert.create()\n self.assertTrue(cert._verify())\n cert.delete()", "def test_get_missing_host_cert(self):\n cn = 'test.get-missing-foo.com'\n cert = SpokeHostCert(cn, self.ca_name)\n expected_result = {'count': 0, 'type': 'Certificate', 'data': [], 'exit_code': 3,\n 'msg': 'No Certificate(s) found'}\n result = cert.get()\n self.assertEqual(result, expected_result)", "def test_isc_fqdn_name_failing(self):\n test_data = [\"www.weird-hostname#a.com\"]\n result = fqdn_name.runTests(test_data, failureTests=True)\n self.assertTrue(result[0])", "def _verify_hostname(self, hostname, cert):\r\n common_name = self._get_common_name(cert)\r\n alt_names = self._get_subject_alt_names(cert)\r\n\r\n # replace * with alphanumeric and dash\r\n # replace . with literal .\r\n # http://www.dns.net/dnsrd/trick.html#legal-hostnames\r\n valid_patterns = [\r\n re.compile('^' + pattern.replace(r\".\", r\"\\.\")\r\n .replace(r\"*\", r\"[0-9A-Za-z\\-]+\") + '$')\r\n for pattern in (set(common_name) | set(alt_names))]\r\n\r\n return any(\r\n pattern.search(hostname)\r\n for pattern in valid_patterns\r\n )", "def is_valid_host_name(hostname):\n if len(hostname) > 255:\n return False\n if hostname[0].isdigit(): return False\n if hostname[-1:] == \".\":\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n allowed = re.compile(\"(?!-)[A-Z\\d-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(x) for x in hostname.split(\".\"))", "def _validate_hostname(cls, hostname: str) -> bool:\n hostname_regex = re.compile(\n r\"(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\.?\")\n return bool(hostname_regex.fullmatch(hostname))", "def test_nonexistent_domain(self):\n\t\ttry:\n\t\t\tresult = acme_simple.main([\n\t\t\t\t\"--account-key\", self.keys['account_key'].name,\n\t\t\t\t\"--csr\", self.keys['nonexistent_csr'].name,\n\t\t\t\t\"--acme-dir\", self.tempdir,\n\t\t\t\t\"--ca\", self.CA,\n\t\t\t])\n\t\texcept Exception as e:\n\t\t\tresult = e\n\t\tself.assertIsInstance(result, ValueError)\n\t\tself.assertIn(\"but couldn't download\", result.args[0])", "def test_create_host_alt_name(self):\n cn = 'test.foo-alt.com'\n alt_name = 'test.foo-other.com'\n cert = SpokeHostCert(cn, self.ca_name)\n expected_result = {'count': 1, 'type': 'Certificate', 'exit_code': 0, \n 'msg': 'Found Certificate:'}\n expected_result['data'] = [{'cert_cn': cn,\n 'verify': 'success',\n 'cert_as_pem': ''}]\n result = cert.create(alt_name)\n result['data'][0]['cert_as_pem'] = ''\n self.assertEqual(result, expected_result)\n cert.delete()", "def test_https_self_signed_cert(self):\n domain = inspect(\"self-signed.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_self_signed_cert)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test when there is a bad chain of trust for a certificate.
def test_https_bad_chain(self): domain = inspect("untrusted-root.badssl.com") basic_check(domain.https) self.assertTrue(domain.https.https_bad_chain)
[ "def test_trustRootPlatformRejectsUntrustedCA(self):\n caSelfCert, serverCert = certificatesForAuthorityAndServer()\n chainedCert = pathContainingDumpOf(self, serverCert, caSelfCert)\n privateKey = pathContainingDumpOf(self, serverCert.privateKey)\n\n sProto, cProto, sWrapped, cWrapped, pump = loopbackTLSConnection(\n trustRoot=platformTrust(),\n privateKeyFile=privateKey,\n chainedCertFile=chainedCert,\n )\n # No data was received.\n self.assertEqual(cWrapped.data, b'')\n\n # It was an L{SSL.Error}.\n self.assertEqual(cWrapped.lostReason.type, SSL.Error)\n\n # Some combination of OpenSSL and PyOpenSSL is bad at reporting errors.\n err = cWrapped.lostReason.value\n self.assertEqual(err.args[0][0][2], 'tlsv1 alert unknown ca')", "def test_trustRootFromCertificatesUntrusted(self):\n key, cert = makeCertificate(O=b\"Server Test Certificate\", CN=b\"server\")\n serverCert = sslverify.PrivateCertificate.fromCertificateAndKeyPair(\n sslverify.Certificate(cert),\n sslverify.KeyPair(key),\n )\n untrustedCert = sslverify.Certificate(\n makeCertificate(O=b\"CA Test Certificate\", CN=b\"unknown CA\")[1]\n )\n\n trust = sslverify.trustRootFromCertificates([untrustedCert])\n\n # Since we only trust 'untrustedCert' which has not signed our\n # server's cert, we should reject this connection\n sProto, cProto, sWrap, cWrap, pump = loopbackTLSConnectionInMemory(\n trustRoot=trust,\n privateKey=serverCert.privateKey.original,\n serverCertificate=serverCert.original,\n )\n\n # This connection should fail, so no data was received.\n self.assertEqual(cWrap.data, b'')\n\n # It was an L{SSL.Error}.\n self.assertEqual(cWrap.lostReason.type, SSL.Error)\n\n # Some combination of OpenSSL and PyOpenSSL is bad at reporting errors.\n err = cWrap.lostReason.value\n self.assertEqual(err.args[0][0][2], 'tlsv1 alert unknown ca')", "def test_validHostnameInvalidCertificate(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"valid.example.com\",\n u\"valid.example.com\",\n validCertificate=False,\n )\n\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, SSL.Error)\n self.assertIsInstance(sErr, SSL.Error)", "def validate_cert_chain(self):\r\n\r\n\t\tchain = self.trusting_chain\r\n\t\tif len(self.trusting_chain) <= 1:\r\n\t\t\treturn False \r\n\t\tfor i in range(0, len(chain) - 1):\r\n\r\n\t\t\tif not self.validate_certificate(chain[i]):\r\n\t\t\t\treturn False\r\n\r\n\t\t\t#verifies if the signatures are valid \r\n\t\t\tif not self.validate_signature(chain[i+1], chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\t\t# verifies if the certificate is not on a CRL \r\n\t\t\tif not self.crl_validation(chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\treturn True", "def test_realCAsBetterNotSignOurBogusTestCerts(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"valid.example.com\",\n u\"valid.example.com\",\n validCertificate=False,\n useDefaultTrust=True,\n )\n\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, SSL.Error)\n self.assertIsInstance(sErr, SSL.Error)", "def test_failedCertificateVerification(self):\n onServerLost = defer.Deferred()\n onClientLost = defer.Deferred()\n self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,\n certificate=self.sCert, verify=False,\n requireCertificate=False),\n sslverify.OpenSSLCertificateOptions(verify=True,\n requireCertificate=False, caCerts=[self.cCert]),\n onServerLost=onServerLost,\n onClientLost=onClientLost)\n\n d = defer.DeferredList([onClientLost, onServerLost],\n consumeErrors=True)\n def afterLost(result):\n ((cSuccess, cResult), (sSuccess, sResult)) = result\n self.assertFalse(cSuccess)\n self.assertFalse(sSuccess)\n\n return d.addCallback(afterLost)", "def test_wrong_cert(self):\n certfile = os.path.join(os.path.dirname(__file__) or os.curdir,\n \"keycert.pem\")\n server = ThreadedEchoServer(SIGNED_CERTFILE,\n certreqs=ssl.CERT_REQUIRED,\n cacerts=SIGNING_CA, chatty=False,\n connectionchatty=False)\n with server, \\\n socket.socket() as sock, \\\n test_wrap_socket(sock,\n certfile=certfile,\n ssl_version=ssl.PROTOCOL_TLSv1) as s:\n try:\n # Expect either an SSL error about the server rejecting\n # the connection, or a low-level connection reset (which\n # sometimes happens on Windows)\n s.connect((HOST, server.port))\n except ssl.SSLError as e:\n if support.verbose:\n sys.stdout.write(\"\\nSSLError is %r\\n\" % e)\n except OSError as e:\n if e.errno != errno.ECONNRESET:\n raise\n if support.verbose:\n sys.stdout.write(\"\\nsocket.error is %r\\n\" % e)\n else:\n self.fail(\"Use of invalid cert should have failed!\")", "def test_https_self_signed_cert(self):\n domain = inspect(\"self-signed.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_self_signed_cert)", "def test_check_cert(certfile):\n cert = load_cert(certfile)\n\n now = datetime.datetime.utcnow()\n if now > cert.not_valid_after:\n raise Exception(\"Certificate has expired!\")\n\n elif now + datetime.timedelta(hours=20) > cert.not_valid_after:\n print('> Certificate expiring soon: %s' % cert.not_valid_after)\n\n elif now < cert.not_valid_before:\n raise Exception('Certificate is not yet valid!')", "def tests_validate_self_signed_root_ca(self):\n cert = SpokeCACert(self.ca_cn, self.ca_name)\n self.assertTrue(cert._verify())", "def test_extraChainDoesNotBreakPyOpenSSL(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n extraCertChain=self.extraCertChain,\n )\n ctx = opts.getContext()\n self.assertIsInstance(ctx, SSL.Context)", "def test_trustRootFromCertificatesOpenSSLObjects(self):\n private = sslverify.PrivateCertificate.loadPEM(A_KEYPAIR)\n certX509 = private.original\n\n exception = self.assertRaises(\n TypeError,\n sslverify.trustRootFromCertificates, [certX509],\n )\n self.assertEqual(\n \"certificates items must be twisted.internet.ssl.CertBase \"\n \"instances\",\n exception.args[0],\n )", "def test_trustRootSpecificCertificate(self):\n caCert, serverCert = certificatesForAuthorityAndServer()\n otherCa, otherServer = certificatesForAuthorityAndServer()\n sProto, cProto, sWrapped, cWrapped, pump = loopbackTLSConnection(\n trustRoot=caCert,\n privateKeyFile=pathContainingDumpOf(self, serverCert.privateKey),\n chainedCertFile=pathContainingDumpOf(self, serverCert),\n )\n pump.flush()\n self.assertIsNone(cWrapped.lostReason)\n self.assertEqual(cWrapped.data,\n sWrapped.greeting)", "def test_empty_cert(self):\n self.bad_cert_test(\"nullcert.pem\")", "def is_trusted(self, host, trust):\n raise NotImplementedError()", "def test_signed_xip_multiple_certificates_invalid_input(data_dir):\n # indexed certificate is not specified\n der_file_names = ['selfsign_4096_v3.der.crt', 'selfsign_3072_v3.der.crt', 'selfsign_2048_v3.der.crt']\n with pytest.raises(IndexError):\n certificate_block(data_dir, der_file_names, 3)\n\n # indexed certificate is not specified\n der_file_names = ['selfsign_4096_v3.der.crt', None, 'selfsign_3072_v3.der.crt', 'selfsign_2048_v3.der.crt']\n with pytest.raises(ValueError):\n certificate_block(data_dir, der_file_names, 1)\n\n # public key in certificate and private key does not match\n der_file_names = ['selfsign_4096_v3.der.crt']\n cert_block = certificate_block(data_dir, der_file_names, 0)\n priv_key_pem_data = _load_private_key(data_dir, 'selfsign_privatekey_rsa2048.pem')\n with pytest.raises(ValueError):\n MasterBootImage(app=bytes(range(128)), load_addr=0, image_type=MasterBootImageType.SIGNED_XIP_IMAGE,\n trust_zone=TrustZone.disabled(),\n cert_block=cert_block, priv_key_pem_data=priv_key_pem_data).export()\n\n # chain of certificates does not match\n der_file_names = ['selfsign_4096_v3.der.crt']\n chain_certificates = ['ch3_crt2_v3.der.crt']\n with pytest.raises(ValueError):\n certificate_block(data_dir, der_file_names, 0, chain_certificates)", "def test_constructorDoesNotAllowLegacyWithTrustRoot(self):\n self.assertRaises(\n TypeError,\n sslverify.OpenSSLCertificateOptions,\n privateKey=self.sKey, certificate=self.sCert,\n verify=True, trustRoot=None, caCerts=self.caCerts,\n )\n self.assertRaises(\n TypeError,\n sslverify.OpenSSLCertificateOptions,\n privateKey=self.sKey, certificate=self.sCert,\n trustRoot=None, requireCertificate=True,\n )", "def test_trust_init_fail(self):\r\n\r\n self._stubs_v3(method='trust', trust_scoped=False)\r\n cfg.CONF.set_override('deferred_auth_method', 'trusts')\r\n self.m.ReplayAll()\r\n\r\n ctx = utils.dummy_context()\r\n ctx.username = None\r\n ctx.password = None\r\n ctx.auth_token = None\r\n ctx.trust_id = 'atrust123'\r\n ctx.trustor_user_id = 'trustor_user_id'\r\n self.assertRaises(exception.AuthorizationFailure,\r\n heat_keystoneclient.KeystoneClient, ctx)", "def test_trust_init_fail(self):\n\n self._stubs_v3(method='trust', trust_scoped=False)\n cfg.CONF.set_override('deferred_auth_method', 'trusts')\n self.m.ReplayAll()\n\n ctx = utils.dummy_context()\n ctx.username = None\n ctx.password = None\n ctx.auth_token = None\n ctx.trust_id = 'atrust123'\n ctx.trustor_user_id = 'trustor_user_id'\n self.assertRaises(exception.AuthorizationFailure,\n heat_keystoneclient.KeystoneClient, ctx)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test when a certificate is selfsigned.
def test_https_self_signed_cert(self): domain = inspect("self-signed.badssl.com") basic_check(domain.https) self.assertTrue(domain.https.https_self_signed_cert)
[ "def allow_self_signed_certificate():\n\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n ssl._create_default_https_context = _create_unverified_https_context\n except AttributeError:\n # legacy Python that doesn't verify HTTPS certificates by default\n\n pass", "def tests_validate_self_signed_root_ca(self):\n cert = SpokeCACert(self.ca_cn, self.ca_name)\n self.assertTrue(cert._verify())", "def test_replace_certificate_signing_request(self):\n pass", "def test_read_certificate_signing_request_status(self):\n pass", "def test_read_certificate_signing_request(self):\n pass", "def create_self_signed_certificate(*props): # pylint: disable=unused-argument\n pass", "def test_replace_certificate_signing_request_status(self):\n pass", "def test_patch_certificate_signing_request_status(self):\n pass", "def test_patch_certificate_signing_request(self):\n pass", "def test_create_certificate_signing_request(self):\n pass", "def test_trustRootSelfSignedServerCertificate(self):\n key, cert = makeCertificate(O=b\"Server Test Certificate\", CN=b\"server\")\n selfSigned = sslverify.PrivateCertificate.fromCertificateAndKeyPair(\n sslverify.Certificate(cert),\n sslverify.KeyPair(key),\n )\n\n trust = sslverify.trustRootFromCertificates([selfSigned])\n\n # Since we trust this exact certificate, connections to this server\n # should succeed.\n sProto, cProto, sWrap, cWrap, pump = loopbackTLSConnectionInMemory(\n trustRoot=trust,\n privateKey=selfSigned.privateKey.original,\n serverCertificate=selfSigned.original,\n )\n self.assertEqual(cWrap.data, b'greetings!')\n self.assertIsNone(cWrap.lostReason)", "def test_empty_cert(self):\n self.bad_cert_test(\"nullcert.pem\")", "def test_create_agent_certificate_signing_request(self):\n pass", "def test_cert_verification(self, session):\n adapter = DummyAdapter()\n session.mount(\"https://\", adapter)\n client = corbeau.Client(self.dsn)\n client.captureMessage(\"oh noes!\")\n request = adapter.request\n kwargs = adapter.kwargs\n self.assertTrue(kwargs[\"verify\"])\n self.assertEqual(kwargs[\"timeout\"], 1)\n self.assertTrue(\"X-Sentry-Auth\" in request.headers)\n self.assertTrue(request.body)", "def test_read_certificate_signing_request_approval(self):\n pass", "def generate_selfsigned_certificate(self):\n key = self.get_private_key_obj()\n issuer = subject = self.build_x509_name()\n subject_alt_name = self.build_subject_alt_name()\n cert = x509.CertificateBuilder(\n ).subject_name(subject\n ).issuer_name(issuer\n ).public_key(key.public_key()\n ).serial_number(x509.random_serial_number()\n ).not_valid_before(datetime.datetime.utcnow()\n ).not_valid_after(datetime.datetime.utcnow() +\n datetime.timedelta(days=10)\n ).add_extension(subject_alt_name, critical=False,\n ).sign(key, hashes.SHA256())\n self.write_pki_asset(cert.public_bytes(serialization.Encoding.PEM), \n \"cert\")", "def test_replace_certificate_signing_request_approval(self):\n pass", "def test_realCAsBetterNotSignOurBogusTestCerts(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"valid.example.com\",\n u\"valid.example.com\",\n validCertificate=False,\n useDefaultTrust=True,\n )\n\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, SSL.Error)\n self.assertIsInstance(sErr, SSL.Error)", "def _sanityCheckForSSL(self):\n if not self.requiresSsl():\n return 0\n\n if not self.sslCertPath:\n log.error(\"sslCertPath to be set - cannot start server\")\n return 1\n try:\n util.mkdirChain(os.path.dirname(self.sslCertPath))\n except OSError, err:\n log.error(\"Could not access sslCert dir %s: %s\" % os.path.dirname(self.sslCertPath), err)\n\n if self.caCertPath:\n log.warning(\"The caCertPath option is deprecated\")\n return self.makeCertificate()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a vtk Unstructured Grid file (.vtk, .vtu) from a welltracks DataFrame
def makeVTKWells(fname_base, welltracks_df, xml=False): numpoints = welltracks_df.shape[0] wells = welltracks_df['Well'].unique().tolist() numwells = len(wells) grid = vtkUnstructuredGrid() points = vtkPoints() for i in range(numpoints): points.InsertNextPoint(welltracks_df.loc[i,'X'], welltracks_df.loc[i,'Y'], welltracks_df.loc[i,'Z']) cells = vtkCellArray() wellname = vtkStringArray() wellname.SetName('Well') for well in wells: print well polyline = vtkPolyLine() indices = welltracks_df[welltracks_df['Well']==well].index.tolist() for i, j in enumerate(indices): polyline.GetPointIds().SetNumberOfIds(len(indices)) polyline.GetPointIds().SetId(i,j) cells.InsertNextCell(polyline) wellname.InsertNextValue(well) grid.SetPoints(points) grid.SetCells(VTK_POLY_LINE, cells) grid.GetCellData().AddArray(wellname) if xml: writer = vtkXMLUnstructuredGridWriter() writer.SetFileName('{}.vtu'.format(fname_base)) writer.SetDataModeToAscii() writer.SetInputData(grid) writer.Write() else: writer = vtkUnstructuredGridWriter() writer.SetFileName('{}.vtk'.format(fname_base)) writer.SetInputData(grid) writer.Write()
[ "def byu_to_vtk(byu_filename, vtk_filename):\n V, F = load_surface(byu_filename)\n nv, nf = V.shape[1], F.shape[0]\n with open(vtk_filename, 'w+') as f:\n f.write('# vtk DataFile Version 3.0\\n')\n f.write('Surface Data\\n')\n f.write('ASCII\\n')\n f.write('DATASET POLYDATA\\n\\n')\n f.write('POINTS {} float\\n'.format(nv))\n for i in range(nv):\n f.write('{} {} {}\\n'.format(V[0, i], V[1, i], V[2, i]))\n\n f.write('POLYGONS {} {}\\n'.format(nf, nf*4))\n for i in range(nf):\n f.write('3 {} {} {}\\n'.format(F[i, 0], F[i, 1], F[i, 2]))", "def createVTKFile(args, x, y, z, numNodes, timestep):\n\n from os import path, makedirs\n # quick check to make sure file extension is correct\n fileName = set_filename(args.vtkout)\n # open .vts file for writing)\n if not path.exists(fileName):\n makedirs(fileName)\n vtkout = open(path.join(fileName, fileName+str(timestep)+'.vts'), 'w')\n\n # writing the VTK file outline\n vtkout.write('<VTKFile type=\"StructuredGrid\" version=\"0.1\" byte_order=\"LittleEndian\">\\n')\n numXValues = abs(round((x[2]-x[0])/x[1]))\n numYValues = abs(round((y[2]-y[0])/y[1]))\n numZValues = abs(round((z[2]-z[0])/z[1]))\n\n vtkout.write('\\t<StructuredGrid WholeExtent=\"0 %d 0 %d 0 %d\">\\n' % (numXValues, numYValues, numZValues))\n vtkout.write('\\t\\t<Piece Extent=\"0 %d 0 %d 0 %d\">\\n' % (numXValues, numYValues, numZValues))\n vtkout.write('\\t\\t\\t<PointData Scalars=\"node_id\" Vectors=\"displacement\">\\n')\n # writing node ids\n vtkout.write('\\t\\t\\t\\t<DataArray type=\"Float32\" Name=\"node_id\" format=\"ascii\">\\n')\n for i in range(1, numNodes+1):\n vtkout.write('\\t\\t\\t\\t\\t%.1f\\n' % i)\n vtkout.write('\\t\\t\\t\\t</DataArray>\\n')\n # writing displacement values\n vtkout.write('\\t\\t\\t\\t<DataArray NumberOfComponents=\"3\" type=\"Float32\" Name=\"displacement\" format=\"ascii\">\\n')\n\n with open('disp_temp.txt', 'r') as displace_temp:\n for line in displace_temp:\n vtkout.write('\\t\\t\\t\\t\\t'+line)\n\n vtkout.write('\\t\\t\\t\\t</DataArray>\\n')\n vtkout.write('\\t\\t\\t</PointData>\\n')\n # writing point position values\n vtkout.write('\\t\\t\\t<Points>\\n')\n vtkout.write('\\t\\t\\t\\t<DataArray type=\"Float32\" Name=\"Array\" NumberOfComponents=\"3\" format=\"ascii\">\\n')\n\n with open('pos_temp.txt', 'r') as pos_temp:\n for line in pos_temp:\n vtkout.write('\\t\\t\\t\\t\\t'+line)\n\n vtkout.write('\\t\\t\\t\\t</DataArray>\\n')\n vtkout.write('\\t\\t\\t</Points>\\n')\n vtkout.write('\\t\\t</Piece>\\n')\n vtkout.write('\\t</StructuredGrid>\\n')\n vtkout.write('</VTKFile>')\n\n vtkout.close()\n\n return 0", "def SaveVTK(self, outfile=\"Output\"):\n writer = vtk.vtkXMLUnstructuredGridWriter()\n writer.SetFileName(outfile + \".vtu\")\n writer.SetInput(self.mesh)\n writer.Write()", "def export_vtk(self, filename):\n\n pass", "def writeVTK(self, fileName, models=None):\n import vtk\n from vtk import vtkXMLUnstructuredGridWriter as Writer, VTK_VERSION\n from vtk.util.numpy_support import numpy_to_vtk, numpy_to_vtkIdTypeArray\n\n # Make the data parts for the vtu object\n # Points\n #mesh.number()\n if(type(mesh) is TreeMesh):\n ptshMat = mesh.gridhN\n ptsMat = np.vstack((mesh.gridN, mesh.gridhN))\n else:\n ptsMat = mesh._gridN + mesh.x0\n\n vtkPts = vtk.vtkPoints()\n vtkPts.SetData(numpy_to_vtk(ptsMat, deep=True))\n # Cells\n cellConn = np.array([c.nodes for c in mesh], dtype=np.int64)\n\n cellsMat = np.concatenate((np.ones((cellConn.shape[0], 1), dtype=np.int64)*cellConn.shape[1], cellConn), axis=1).ravel()\n cellsArr = vtk.vtkCellArray()\n cellsArr.SetNumberOfCells(cellConn.shape[0])\n cellsArr.SetCells(cellConn.shape[0], numpy_to_vtkIdTypeArray(cellsMat, deep =True))\n\n # Make the object\n vtuObj = vtk.vtkUnstructuredGrid()\n vtuObj.SetPoints(vtkPts)\n vtuObj.SetCells(vtk.VTK_VOXEL, cellsArr)\n # Add the level of refinement as a cell array\n cellSides = np.array([np.array(vtuObj.GetCell(i).GetBounds()).reshape((3, 2)).dot(np.array([-1, 1])) for i in np.arange(vtuObj.GetNumberOfCells())])\n uniqueLevel, indLevel = np.unique(np.prod(cellSides, axis=1), return_inverse=True)\n refineLevelArr = numpy_to_vtk(indLevel.max() - indLevel, deep=1)\n refineLevelArr.SetName('octreeLevel')\n vtuObj.GetCellData().AddArray(refineLevelArr)\n # Assign the model('s) to the object\n if models is not None:\n for item in six.iteritems(models):\n # Convert numpy array\n vtkDoubleArr = numpy_to_vtk(item[1], deep=1)\n vtkDoubleArr.SetName(item[0])\n vtuObj.GetCellData().AddArray(vtkDoubleArr)\n\n # Make the writer\n vtuWriteFilter = Writer()\n if float(VTK_VERSION.split('.')[0]) >= 6:\n vtuWriteFilter.SetInputData(vtuObj)\n else:\n vtuWriteFilter.SetInput(vtuObj)\n vtuWriteFilter.SetFileName(fileName)\n # Write the file\n vtuWriteFilter.Update()", "def write_vtk_series(self, path, name,\n print_status=True):\n from pyevtk.hl import gridToVTK # evtk module\n import xml.etree.cElementTree as ET # xml module\n\n # set the collection filename\n collection_fname = name + \".pvd\"\n\n # set up blank list of the vtk filenames\n data_filenames = []\n\n # set up XML tree for PVD collection file\n root = ET.Element(\"VTKFile\")\n root.set(\"type\", \"Collection\")\n collection = ET.SubElement(root, \"Collection\")\n\n # write the VTK files\n for i, time in enumerate(np.sort(self.times)):\n # get the system time (for elapsed time)\n t_start = pytime.time()\n\n # get the filename containing the data at current time\n fname = self.fdict[time]\n\n # base name of data file\n vtk_name = name + '_' + str(i)\n\n # read the CSV data file\n df_inst = self.get_df_inst(time=time)\n grid_data, grid_dims = self.fielddata_from_df(df_inst)\n\n # unpack the grid data\n X = grid_data['X']\n Y = grid_data['Y']\n Z = grid_data['Z']\n U = grid_data['U']\n V = grid_data['V']\n W = grid_data['W']\n\n # save velocity fields as tuples\n velocity = (U, V, W)\n\n # create dictionary of data\n pointData = {'velocity': velocity}\n\n # check if the file has freestream velocity data\n if 'Ufs' in grid_data and \\\n 'Vfs' in grid_data and \\\n 'Wfs' in grid_data:\n # get the freestream velocity data\n Ufs = grid_data['Ufs']\n Vfs = grid_data['Vfs']\n Wfs = grid_data['Wfs']\n\n # save as tuple\n velocity_fs = (Ufs, Vfs, Wfs)\n\n # append to pointdata dictionary\n pointData['velocity_fs'] = velocity_fs\n\n data_filename = gridToVTK(os.path.abspath(os.path.join(path,\n vtk_name)),\n X, Y, Z,\n pointData=pointData)\n\n # append filename to list\n data_filenames.append(data_filename)\n\n # add elements to XML tree for PVD collection file\n dataset = ET.SubElement(collection, \"DataSet\")\n dataset.set(\"timestep\", str(time))\n dataset.set(\"file\", os.path.basename(data_filename))\n\n # print status message\n elapsed_time = pytime.time() - t_start\n if print_status:\n print 'Converted: ' + fname + ' -->\\n\\t\\t\\t' + data_filename +\\\n ' in %2.2f s\\n' % (elapsed_time)\n\n # write the collection file\n tree = ET.ElementTree(root)\n pvd_filename = os.path.abspath(os.path.join(path, collection_fname))\n tree.write(pvd_filename, xml_declaration=True)\n\n if print_status:\n print 'Wrote ParaView collection file: ' + pvd_filename\n\n return data_filenames, pvd_filename", "def writeVTK_UnstructuredGrid(self, arr, fname, scalar_func=None):\n assert arr.shape[1] == 3 or arr.shape[1] == 4, '\\nneed 3 or 4 columns for this'\n if scalar_func == None:\n scalar_func = self.vtk_scalar_func\n if arr.shape[1] == 4:\n HAVE_SCALARS = 1\n else:\n HAVE_SCALARS = 0\n print('No scalar values supplied Z axis values will be used')\n\n n=arr.shape[0]\n print(\"n:\",n)\n # write data to vtk polydata file\n # write header\n out = open(fname+'.vtk', 'w')\n h1 = \"# vtk DataFile Version 2.0\\n\"\n h1 += \"%s\\n\" % fname\n h1 += \"ASCII\\n\"\n h1 += \"DATASET UNSTRUCTURED_GRID\\n\"\n h1 += \"POINTS \" + str(n) + \" double\\n\"\n out.write(h1)\n # write xyz data\n for r in range(n):\n #s = '%15.2f %15.2f %15.2f' % (x[i], y[i], z[i])\n out.write(str(arr[r,0])+\" \"+str(arr[r,1])+\" \"+str(arr[r,2])+'\\n')\n\n # write cell data\n out.write(\"CELLS \"+ str(n)+ \" \"+ str(2*n)+'\\n')\n for r in range(n):\n #s = '1 %d \\n' % (i)\n out.write(\"1 \"+str(r)+\"\\n\")\n\n # write cell types\n out.write(\"CELL_TYPES \" + str(n)+'\\n')\n for r in range(n):\n out.write(\"1 \\n\")\n\n # write z scalar values\n h2 = '\\n' + \"\"\"POINT_DATA \"\"\" + str(n) + \"\\n\"\n h3 = \"SCALARS %s double 1\\n\" % fname\n h3 += \"LOOKUP_TABLE default\\n\"\n out.write(h2 + h3)\n\n for r in range(n):\n if HAVE_SCALARS:\n sc=(scalar_func(arr[r,3]))\n else:\n sc=(scalar_func(arr[r,2]))\n out.write(str(sc)+ \"\\n\")\n\n out.write('\\n')\n out.close()", "def voxel_to_vtk(voxel_file: PathLike, output: PathLike = 'plot.vti'):\n\n # imported vtk only if used as vtk is an option dependency\n import vtk\n\n _min_version = (2, 0)\n\n # Read data from voxel file\n with h5py.File(voxel_file, \"r\") as fh:\n # check version\n version = tuple(fh.attrs[\"version\"])\n if version < _min_version:\n old_version = \".\".join(map(str, version))\n min_version = \".\".join(map(str, _min_version))\n err_msg = (\n f\"This voxel file's version is {old_version}. This function only \"\n f\" supports voxel files with version {min_version} or higher. \"\n \"Please generate a new voxel file using a newer version of OpenMC.\"\n )\n raise ValueError(err_msg)\n\n dimension = fh.attrs[\"num_voxels\"]\n width = fh.attrs[\"voxel_width\"]\n lower_left = fh.attrs[\"lower_left\"]\n\n nx, ny, nz = dimension\n\n grid = vtk.vtkImageData()\n grid.SetDimensions(nx + 1, ny + 1, nz + 1)\n grid.SetOrigin(*lower_left)\n grid.SetSpacing(*width)\n\n # transpose data from OpenMC ordering (zyx) to VTK ordering (xyz)\n # and flatten to 1-D array\n h5data = fh[\"data\"][...]\n\n data = vtk.vtkIntArray()\n data.SetName(\"id\")\n # set the array using the h5data array\n data.SetArray(h5data, h5data.size, True)\n # add data to image grid\n grid.GetCellData().AddArray(data)\n\n writer = vtk.vtkXMLImageDataWriter()\n if vtk.vtkVersion.GetVTKMajorVersion() > 5:\n writer.SetInputData(grid)\n else:\n writer.SetInput(grid)\n if not output.endswith(\".vti\"):\n output += \".vti\"\n writer.SetFileName(str(output))\n writer.Write()\n\n return output", "def _as_vtk(self, as_linear=True, include_ids=False):\n nodes = self.nodes.coordinates_field.data\n etypes = self.elements.element_types_field.data\n conn = self.elements.connectivities_field.data\n try:\n from ansys.dpf.core.vtk_helper import dpf_mesh_to_vtk\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n \"To use plotting capabilities, please install pyvista \"\n \"with :\\n pip install pyvista>=0.24.0\"\n )\n\n grid = dpf_mesh_to_vtk(nodes, etypes, conn, as_linear)\n\n # consider adding this when scoping request is faster\n if include_ids:\n grid[\"node_ids\"] = self.nodes.scoping.ids\n grid[\"element_ids\"] = self.elements.scoping.ids\n\n return grid", "def createPVDFile(args, timestep_values):\n from os import path, makedirs\n # quick check to make sure file extension is correct\n fileName = set_filename(args.vtkout)\n # open .pvd file for writing)\n if not path.exists(fileName):\n makedirs(fileName)\n vtkout = open(path.join(fileName, fileName+'.pvd'), 'w')\n vtkout.write('<VTKFile type=\"Collection\" version=\"0.1\">\\n')\n vtkout.write('\\t<Collection>\\n')\n\n timestep = 1\n for i in timestep_values:\n vtkout.write('\\t\\t<DataSet timestep=\"{0}\" file=\"{1}\"/>\\n'.format(i, fileName+str(timestep)+'.vts'))\n timestep += 1\n vtkout.write('\\t</Collection>\\n')\n vtkout.write('</VTKFile>\\n')\n\n return 0", "def load_vtk(file_number,vtk_dir=\"vtk_files\",organs=list_organs):\n file_number = str(file_number).zfill(3)\n reader = vtk.vtkGenericDataObjectReader()\n reader.SetFileName(vtk_dir+\"/500%s_fat_content.vtk\" %(file_number))\n reader.Update()\n \n data_fat = vtk_to_numpy(reader.GetOutput().GetPointData().GetScalars())\n x_range, y_range, z_range = reader.GetOutput().GetDimensions()\n data = np.zeros(6*x_range*y_range*z_range).reshape(6,x_range,y_range,z_range)\n data[0] = data_fat.reshape(x_range,y_range,z_range)\n\n #reader.SetFileName(vtk_dir+\"/500%s_wat_content.vtk\" %(file_number))\n #reader.Update()\n #data_wat = vtk_to_numpy(reader.GetOutput().GetPointData().GetScalars())\n #data[1] = data_wat.reshape(x_range,y_range,z_range)\n \n for i,organ in enumerate(organs):\n reader.SetFileName(vtk_dir+\"/binary_\"+organ+\"500%s.vtk\" %(file_number))\n reader.Update()\n data_organ = vtk_to_numpy(reader.GetOutput().GetPointData().GetScalars())\n data[i+1] = data_organ.reshape(x_range,y_range,z_range)\n \n grid = torch.from_numpy(data[:,54:214,:,54:214])\n return grid.to(dtype=torch.float32)", "def _write_vol_evtk(model, file_name, data_label, nsteps, real_coords=True):\n # Define grid spacing\n loop_X = np.linspace(model.bounding_box[0, 0], model.bounding_box[1, 0], nsteps[0])\n loop_Y = np.linspace(model.bounding_box[0, 1], model.bounding_box[1, 1], nsteps[1])\n loop_Z = np.linspace(model.bounding_box[0, 2], model.bounding_box[1, 2], nsteps[2])\n\n # Generate model values in 3d grid\n xx, yy, zz = np.meshgrid(loop_X, loop_Y, loop_Z, indexing='ij')\n # xyz is N x 3 vector array\n xyz = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T\n vals = model.evaluate_model(xyz, scale=False)\n if real_coords:\n model.rescale(xyz)\n\n # Define vertices - xyz.shape[0] is length of vector array\n x = np.zeros(xyz.shape[0])\n y = np.zeros(xyz.shape[0])\n z = np.zeros(xyz.shape[0])\n for i in range(xyz.shape[0]):\n x[i], y[i], z[i] = xyz[i][0], xyz[i][1], xyz[i][2]\n\n # Write to grid\n try:\n pointsToVTK(file_name, x, y, z, data={data_label: vals})\n except Exception as e:\n logger.warning(\"Cannot export volume to VTK file {}: {}\".format(file_name, str(e)))\n return False\n return True", "def _dumpvtk_dumper(dataset):\r\n slf = []\r\n # write the head\r\n slf.append('# vtk DataFile Version 3.0')\r\n slf.append(dataset.title)\r\n slf.append('ASCII')\r\n slf.append('DATASET UNSTRUCTURED_GRID')\r\n # write the points\r\n slf.append('POINTS {} double'.format(len(dataset.points)))\r\n for point in dataset.points:\r\n slf.append('{} {} {}'.format(*point.coordinate))\r\n # write the cells\r\n size = sum([c.cell_size()+1 for c in dataset.cells])\r\n slf.append('CELLS {} {}'.format(len(dataset.cells), size))\r\n for cell in dataset.cells:\r\n slf.append(' '.join(['{:d}'.format(cell.cell_size())] +\r\n ['{:d}'.format(p) for p in cell.points]))\r\n \r\n slf.append('CELL_TYPES {}'.format(len(dataset.cells)))\r\n for cell in dataset.cells:\r\n slf.append('{:d}'.format(cell.cell_type))\r\n # write point data\r\n slf.append('POINT_DATA {}'.format(len(dataset.points)))\r\n for key,field in dataset.point_data.items():\r\n # scalars\r\n if type(field) == ScalarField:\r\n slf.append('SCALARS {} double'.format(field.data_name))\r\n slf.append('LOOKUP_TABLE default')\r\n for d in field.data:\r\n slf.append('{}'.format(d.real))\r\n###############################################################################\r\n# ## Deprecated #\r\n# # vectors #\r\n# elif type(field) == VectorField: #\r\n# slf.append('VECTORS {} double'.format(field.data_name)) #\r\n# for d in field.data: #\r\n# slf.append('{} {} {}'.format(*d)) #\r\n###############################################################################\r\n # vectors (VectorField or Field), use field expression in VTK\r\n else:\r\n slf.append('FIELDS {} 1'.format(key))\r\n slf.append('{} {} {} double'.format(field.data_name,\r\n field.ncomponents, field.size()))\r\n for d in field.data:\r\n slf.append(' '.join(['{}'.format(i.real) for i in d]))\r\n # write cell data\r\n slf.append('CELL_DATA {}'.format(len(dataset.cells)))\r\n for key,field in dataset.cell_data.items():\r\n # scalars\r\n if type(field) == ScalarField:\r\n slf.append('SCALARS {} double'.format(field.data_name))\r\n slf.append('LOOKUP_TABLE default')\r\n for d in field.data:\r\n slf.append('{}'.format(d.real))\r\n###############################################################################\r\n# ## Deprecated #\r\n# # vectors #\r\n# elif type(field) == VectorField: #\r\n# slf.append('VECTORS {} double'.format(field.data_name)) #\r\n# for d in field.data: #\r\n# slf.append('{} {} {}'.format(*d)) #\r\n###############################################################################\r\n # vectors (VectorField or Field), use field expression in VTK\r\n else:\r\n slf.append('FIELDS {} 1'.format(key))\r\n slf.append('{} {} {} double'.format(field.data_name,\r\n field.ncomponents, field.size()))\r\n for d in field.data:\r\n slf.append(' '.join(['{}'.format(i.real) for i in d]))\r\n slf.append('')\r\n return '\\n'.join(slf)", "def CopyVtu(inputVtu):\n \n ugrid = vtk.vtkUnstructuredGrid()\n \n # Add the points\n ugrid.SetPoints(inputVtu.ugrid.GetPoints())\n # Add the cells\n ugrid.SetCells(inputVtu.ugrid.GetCellTypesArray(), inputVtu.ugrid.GetCellLocationsArray(), inputVtu.ugrid.GetCells())\n # Add the point data\n for i in range(inputVtu.ugrid.GetPointData().GetNumberOfArrays()):\n ugrid.GetPointData().AddArray(inputVtu.ugrid.GetPointData().GetArray(i))\n # Add the cell data\n for i in range(inputVtu.ugrid.GetCellData().GetNumberOfArrays()):\n ugrid.GetCellData().AddArray(inputVtu.ugrid.GetCellData().GetArray(i))\n \n # Construct output \n result = vtu()\n result.ugrid = ugrid\n \n return result", "def mixed_type_ug():\n points = array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], # tetra\n [2,0,0], [3,0,0], [3,1,0], [2,1,0],\n [2,0,1], [3,0,1], [3,1,1], [2,1,1], # Hex\n ], 'f')\n # shift the points so we can show both.\n points[:,1] += 2.0\n # The cells\n cells = array([4, 0, 1, 2, 3, # tetra\n 8, 4, 5, 6, 7, 8, 9, 10, 11 # hex\n ])\n # The offsets for the cells, i.e. the indices where the cells\n # start.\n offset = array([0, 5])\n tetra_type = tvtk.Tetra().cell_type # VTK_TETRA == 10\n hex_type = tvtk.Hexahedron().cell_type # VTK_HEXAHEDRON == 12\n cell_types = array([tetra_type, hex_type])\n # Create the array of cells unambiguously.\n cell_array = tvtk.CellArray()\n cell_array.set_cells(2, cells)\n # Now create the UG.\n ug = tvtk.UnstructuredGrid(points=points)\n # Now just set the cell types and reuse the ug locations and cells.\n ug.set_cells(cell_types, offset, cell_array)\n return ug", "def __init__(self, filename):\n\n super(UgridReader, self).__init__()\n \n # read UGRID file\n nc = netCDF4.Dataset(filename, 'r')\n\n lats, lons = None, None\n connectivity = None\n for varname in nc.variables:\n var = nc.variables[varname]\n if hasattr(var, 'cf_role') and var.cf_role == 'face_node_connectivity':\n connectivity = var[:]\n elif hasattr(var, 'standard_name'):\n if var.standard_name == 'longitude' and hasattr(var, 'long_name') and var.long_name.find('node') >= 0:\n lons = var[:]\n #print('found longitude: {}'.format(varname))\n elif var.standard_name == 'latitude' and hasattr(var, 'long_name') and var.long_name.find('node') >= 0:\n lats = var[:]\n #print('found latitude: {}'.format(varname))\n\n ncells = connectivity.shape[0]\n\n # construct the unstructured grid as a collection of \n # 2D cells. Each cell has its own cooordinates. Make\n # sure each cell's area is positive in lat-lon space\n # build unstructured grid\n\n pointArray = numpy.zeros((4 * ncells, 3))\n self.vtk['pointArray'] = pointArray\n\n pointData = self.vtk['pointData']\n pointData.SetNumberOfComponents(3)\n pointData.SetNumberOfTuples(4 * ncells)\n pointData.SetVoidArray(pointArray, 4 * ncells * 3, 1)\n\n points = self.vtk['points']\n points.SetNumberOfPoints(4 * ncells)\n points.SetData(pointData)\n\n grid = self.vtk['grid']\n grid.Allocate(ncells, 1)\n ptIds = vtk.vtkIdList()\n ptIds.SetNumberOfIds(4)\n halfPeriodicity = self.PERIODICITY_LENGTH/2.\n quarterPeriodicity = self.PERIODICITY_LENGTH/4.\n for icell in range(ncells):\n\n i00, i10, i11, i01 = connectivity[icell, :] - 1 # zero based indexing\n\n lon00, lat00 = lons[i00], lats[i00]\n lon10, lat10 = lons[i10], lats[i10]\n lon11, lat11 = lons[i11], lats[i11]\n lon01, lat01 = lons[i01], lats[i01]\n\n area013 = 0.5*( (lon10 - lon00)*(lat01 - lat00) - (lat10 - lat00)*(lon01 - lon00) )\n area231 = 0.5*( (lon01 - lon11)*(lat10 - lat11) - (lat01 - lat11)*(lon10 - lon11) )\n\n if area013 < 0. or area231 < 0.:\n # this cell straddles the dateline\n # base longitude is lon00, add/remove 2*pi to reduce the cell deltas\n index10 = numpy.argmin([abs(lon10 - self.PERIODICITY_LENGTH - lon00), abs(lon10 - lon00), abs(lon10 + self.PERIODICITY_LENGTH - lon00)])\n index11 = numpy.argmin([abs(lon11 - self.PERIODICITY_LENGTH - lon00), abs(lon11 - lon00), abs(lon11 + self.PERIODICITY_LENGTH - lon00)])\n index01 = numpy.argmin([abs(lon01 - self.PERIODICITY_LENGTH - lon00), abs(lon01 - lon00), abs(lon01 + self.PERIODICITY_LENGTH - lon00)])\n\n lon10 += (index10 - 1) * self.PERIODICITY_LENGTH\n lon11 += (index11 - 1) * self.PERIODICITY_LENGTH\n lon01 += (index01 - 1) * self.PERIODICITY_LENGTH\n\n lts = numpy.array([lat00, lat10, lat11, lat01])\n lns = numpy.array([lon00, lon10, lon11, lon01])\n alts = numpy.fabs(lts)\n if numpy.any(alts[:] == quarterPeriodicity):\n # there is a latitude at the pole. The longitude is not well \n # defined in this case - we can set it to any value. For \n # esthetical reason it't good to set it to the average \n # of the longitudes\n i = numpy.argmax(alts - quarterPeriodicity)\n # compute the average lon value, excluding this one\n # and set lns[index] to that value\n avgLon = numpy.sum([lns[(i + 1) % 4], lns[(i + 2) % 4], lns[(i + 3) % 4]]) / 3.\n lns[i] = avgLon\n lon00, lon10, lon11, lon01 = lns\n\n k0 = 4*icell\n k1, k2, k3 = k0 + 1, k0 + 2, k0 + 3 \n\n # storing coords as lon, lat, 0\n pointArray[k0, :] = lon00, lat00, 0.\n pointArray[k1, :] = lon10, lat10, 0.\n pointArray[k2, :] = lon11, lat11, 0.\n pointArray[k3, :] = lon01, lat01, 0.\n\n ptIds.SetId(0, k0)\n ptIds.SetId(1, k1)\n ptIds.SetId(2, k2)\n ptIds.SetId(3, k3)\n grid.InsertNextCell(vtk.VTK_QUAD, ptIds)\n\n\n grid.SetPoints(points)", "def save_vtrs(vtr_basename, varname, fpi_dict, crop_range=[1e-27, 1e-20], stride=10, time_annotation = {'index': [], 'epoch': [], 'time': []}):\n spec = importlib.util.spec_from_file_location('module.name', r'C:/SyA/Projects/iPIC/Python/ipic_utils/vtk_utils.py')\n vtk_utils = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(vtk_utils)\n\n os.makedirs(os.path.split(vtr_basename)[0], exist_ok=True)\n\n dist = fpi_dict['dist']\n epoch = fpi_dict['epoch']\n energy = fpi_dict['energy']\n\n # Phi is the second-to-last dimension\n Yrange = np.linspace(0, np.pi, num=dist.shape[-2])\n # Theta is the last dimension\n Zrange = np.linspace(0, 2*np.pi, num=dist.shape[-1])\n\n for i in range(0, dist.shape[0], stride):\n epch = mu.epoch2int(epoch[i])\n # Energy dimension range\n en = np.log10(energy[i, :])\n vtk_coords, coords = vtk_utils.createVTRCoordinatesIrregular([en, Yrange, Zrange])\n vtr_name = vtr_basename + '_' + ('%10.10i' % epch) + '.vtr'\n\n # Prepare data.\n data = prepare_array(dist[i, :, :, :], crop_range)\n\n # Write\n vtk_utils.writeVTR2(vtr_name, {varname: data}, '', vtk_coords)\n\n # Add time annotation\n time_annotation['index'].append(0 if len(time_annotation['index']) == 0 else time_annotation['index'][-1] + 1)\n time_annotation['epoch'].append(epch)\n time_annotation['time'].append(mu.epoch2time(epoch[i]).isoformat())", "def write_to_vtk(conf, ar, filename):\n params = DispalyParams(conf)\n\n if params.crop is None:\n dims = ar.shape\n arr_cropped = ar.ravel()\n else:\n dims = params.crop\n arr_cropped = crop_array_center(ar, params.crop).ravel()\n\n amps = np.abs(arr_cropped)\n phases = np.arctan2(arr_cropped.imag, arr_cropped.real)\n\n geometry = params.get_geometry()\n coordinates = get_coords(dims, geometry)\n\n sg = tvtk.StructuredGrid()\n sg.points = coordinates\n sg.dimensions = (dims[2], dims[1], dims[0])\n sg.extent = 0, dims[2] - 1, 0, dims[1] - 1, 0, dims[0] - 1\n if params.save_two_files:\n sg.point_data.scalars = amps\n sg.point_data.scalars.name = \"Amp\"\n write_array(sg, filename + \"_Amp.vtk\")\n\n sg.point_data.scalars = phases\n sg.point_data.scalars.name = \"Phase\"\n write_array(sg, filename + \"_Phase.vtk\")\n else:\n sg.point_data.scalars = amps\n sg.point_data.scalars.name = \"Amp\"\n ph = tvtk.FloatArray()\n ph.from_array(phases)\n ph.name = \"Phase\"\n sg.point_data.add_array(ph)\n\n\n write_array(sg, filename + \".vtk\")", "def _write_cubeface_evtk(model, file_name, data_label, nsteps, real_coords=True):\n # Evaluate model at points\n points, tri = create_box(model.bounding_box, nsteps)\n val = model.evaluate_model(points, scale=False)\n if real_coords:\n model.rescale(points)\n\n # Define vertices\n x = np.zeros(points.shape[0])\n y = np.zeros(points.shape[0])\n z = np.zeros(points.shape[0])\n for i in range(points.shape[0]):\n x[i], y[i], z[i] = points[i][0], points[i][1], points[i][2]\n\n # Define connectivity or vertices that belongs to each element\n conn = np.zeros(tri.shape[0] * 3)\n for i in range(tri.shape[0]):\n conn[i*3], conn[i*3+1], conn[i*3+2] = tri[i][0], tri[i][1], tri[i][2]\n\n # Define offset of last vertex of each element\n offset = np.zeros(tri.shape[0])\n for i in range(tri.shape[0]):\n offset[i] = (i+1)*3\n\n # Define cell types\n ctype = np.full(tri.shape[0], VtkTriangle.tid)\n\n try:\n unstructuredGridToVTK(file_name, x, y, z, connectivity=conn, offsets=offset, cell_types=ctype,\n cellData=None, pointData={data_label: val})\n except Exception as e:\n logger.warning(\"Cannot export cuboid surface to VTK file {}: {}\".format(file_name, str(e)))\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a query string from a dictionary
def createQueryString(query_dict): query_string = '' (id == 1) | (id == 2) | (id == 3) | (id == 4) for k, l in query_dict.iteritems(): for v in l: query_string += '({0}=={1})|'.format(k,v) query_string = query_string[:-1] return query_string
[ "def generate_query_string(query_dict):\n if isinstance(query_dict, str):\n return query_dict\n q = ''.join(\"{}={}&\".format(k, v) for k, v in query_dict.items() if v)\n return q[:-1]", "def _dict2query_string(query_params, sort=False):\n\n query_params = query_params.copy()\n\n # Convert parameters names and values to UTF-8 and escape them\n for key, value in query_params.items():\n del query_params[key]\n key = urllib.quote((unicode(key).encode(\"utf-8\")), safe='~')\n value = urllib.quote((unicode(value).encode(\"utf-8\")), safe='~')\n query_params[key] = value\n\n if sort:\n return '&'.join(['%s=%s' % (key, value) for key, value \\\n in sorted(query_params.items())])\n else:\n return '&'.join(['%s=%s' % (key, value) for key, value \\\n in query_params.items()])", "def build_query_string(params, order_by_key=False):\n if not isinstance(params, dict):\n params = dict(params)\n if order_by_key:\n params = sorted(params.items(), key=lambda val: val[0])\n return urllib.urlencode(params)", "def query_string(self, replace={}, original=None):\n # Based on code by Grégoire Weber\n if original is None:\n query = self.REQUEST.form.copy()\n else:\n query = original.copy()\n\n # delete key/value pairs if value is None\n for k,v in replace.items():\n if v is None:\n if query.has_key(k):\n del query[k]\n del replace[k]\n\n # update dictionary\n query.update(replace)\n qs = '&'.join([\"%s=%s\" % (quote_plus(str(k)), quote_plus(str(v)))\n for k,v in query.items()])\n\n return qs", "def urlnoencode(query):\n l = []\n arg = \"%s=%s\"\n\n if hasattr(query, \"items\"):\n # mapping objects\n query = list(query.items())\n\n for k, v in query:\n l.append(arg % (k, v))\n\n return \"&\".join(l)", "def _encode_query(items: dict, *, mask=False) -> str:\n pairs = []\n for key in sorted(items.keys()):\n value = _MASK if mask and key in _MASKED_PARAMS else items[key]\n item = \"{}={}\".format(key, _quote(value))\n # Ensure 'url' goes last per CLI spec\n if key == \"url\":\n pairs.append(item)\n else:\n pairs.insert(0, item)\n return \"&\".join(pairs)", "def to_query_str(params):\n\n if not params:\n return ''\n\n # PERF: This is faster than a list comprehension and join, mainly\n # because it allows us to inline the value transform.\n query_str = '?'\n for k, v in params.items():\n if v is True:\n v = 'true'\n elif v is False:\n v = 'false'\n elif isinstance(v, list):\n v = ','.join(map(str, v))\n else:\n v = str(v)\n\n query_str += k + '=' + v + '&'\n\n return query_str[:-1]", "def URLEncodeQuery(**kwargs):\n for kwarg in kwargs:\n kwargs[kwarg] = quote(str(kwargs[kwarg]))\n return kwargs", "def urlencode(query,doseq=0):\r\n\r\n if hasattr(query,\"items\"):\r\n # mapping objects\r\n query = query.items()\r\n else:\r\n # it's a bother at times that strings and string-like objects are\r\n # sequences...\r\n try:\r\n # non-sequence items should not work with len()\r\n # non-empty strings will fail this\r\n if len(query) and not isinstance(query[0], tuple):\r\n raise TypeError\r\n # zero-length sequences of all types will get here and succeed,\r\n # but that's a minor nit - since the original implementation\r\n # allowed empty dicts that type of behavior probably should be\r\n # preserved for consistency\r\n except TypeError:\r\n ty,va,tb = sys.exc_info()\r\n raise TypeError, \"not a valid non-string sequence or mapping object\", tb\r\n\r\n l = []\r\n if not doseq:\r\n # preserve old behavior\r\n for k, v in query:\r\n k = quote_plus(str(k))\r\n v = quote_plus(str(v))\r\n l.append(k + '=' + v)\r\n else:\r\n for k, v in query:\r\n k = quote_plus(str(k))\r\n if isinstance(v, str):\r\n v = quote_plus(v)\r\n l.append(k + '=' + v)\r\n elif _is_unicode(v):\r\n # is there a reasonable way to convert to ASCII?\r\n # encode generates a string, but \"replace\" or \"ignore\"\r\n # lose information and \"strict\" can raise UnicodeError\r\n v = quote_plus(v.encode(\"ASCII\",\"replace\"))\r\n l.append(k + '=' + v)\r\n else:\r\n try:\r\n # is this a sufficient test for sequence-ness?\r\n x = len(v)\r\n except TypeError:\r\n # not a sequence\r\n v = quote_plus(str(v))\r\n l.append(k + '=' + v)\r\n else:\r\n # loop over the sequence\r\n for elt in v:\r\n l.append(k + '=' + quote_plus(str(elt)))\r\n return '&'.join(l)", "def test_building_query_string(self):\n params = insightiq_api.Parameters({'one': 1}, {'one': 'foo'}, {'two': 'bar'})\n tmp = []\n for param, value in params.items():\n tmp.append('%s=%s' % (param, value))\n query = '&'.join(tmp)\n expected = 'one=1&one=foo&two=bar'\n\n self.assertEqual(query, expected)", "def query_string(cls, **params):\n if params:\n params = cls._transform_params(params)\n return '?%s' % urlencode(params)\n return ''", "def _buildParams(userInput):\n\tparams = \"?\"\n\tfor key, value in userInput.items():\n\t\tif value != None:\n\t\t\tparams += \"%s=%s&\" % (key, quote(str(value)))\n\treturn params", "def _formatQuery(self, query_dict):\n pass", "def _format_query_string(self, query_string):\n query_parts = query_string.split('&')\n data = {}\n for query in query_parts:\n try:\n key, value = query.split('=')\n data[key] = value\n except ValueError:\n pass\n\n return data", "def urlencode(query):\n delkeys = []\n for key, val in query.iteritems():\n if val is None:\n delkeys.append(key)\n for key in delkeys:\n query.pop(key)\n return urllib.urlencode(query)", "def make_query_params(**request_params):\n\n params = {}\n for key, value in six.iteritems(request_params):\n if value is not None:\n params[key] = value\n\n return params", "def urlencode(query, doseq=False, safe='', encoding=None, errors=None):\n\n if hasattr(query, \"items\"):\n query = query.items()\n else:\n # It's a bother at times that strings and string-like objects are\n # sequences.\n try:\n # non-sequence items should not work with len()\n # non-empty strings will fail this\n if len(query) and not isinstance(query[0], tuple):\n raise TypeError\n # Zero-length sequences of all types will get here and succeed,\n # but that's a minor nit. Since the original implementation\n # allowed empty dicts that type of behavior probably should be\n # preserved for consistency\n except TypeError:\n# ty, va, tb = sys.exc_info()\n raise TypeError(\"not a valid non-string sequence \"\n \"or mapping object\")#.with_traceback(tb)\n\n l = []\n if not doseq:\n for k, v in query:\n if isinstance(k, bytes):\n k = quote_plus(k, safe)\n else:\n k = quote_plus(str(k), safe, encoding, errors)\n\n if isinstance(v, bytes):\n v = quote_plus(v, safe)\n else:\n v = quote_plus(str(v), safe, encoding, errors)\n l.append(k + '=' + v)\n else:\n for k, v in query:\n if isinstance(k, bytes):\n k = quote_plus(k, safe)\n else:\n k = quote_plus(str(k), safe, encoding, errors)\n\n if isinstance(v, bytes):\n v = quote_plus(v, safe)\n l.append(k + '=' + v)\n elif isinstance(v, str):\n v = quote_plus(v, safe, encoding, errors)\n l.append(k + '=' + v)\n else:\n try:\n # Is this a sufficient test for sequence-ness?\n x = len(v)\n except TypeError:\n # not a sequence\n v = quote_plus(str(v), safe, encoding, errors)\n l.append(k + '=' + v)\n else:\n # loop over the sequence\n for elt in v:\n if isinstance(elt, bytes):\n elt = quote_plus(elt, safe)\n else:\n elt = quote_plus(str(elt), safe, encoding, errors)\n l.append(k + '=' + elt)\n return '&'.join(l)", "def writeget(dict):\n\turl='?'\n\tfor item in dict:\n\t\turl += str(item) + '=' + str(dict[item]) + '&'\n\tif len(dict):\n\t\treturn url[:-1]\t\n\telse:\n\t\treturn url", "def _stringify_search_params(self, params):\n # Don't create side effects on the dictionary passed in.\n params = params.copy()\n\n query_string = '' # final string to submit to search API\n type_strings = [] # e.g. '(tag:foo OR tag:bar)'\n\n # Everything except the \"plain\" search text needs to be labeled with\n # a field, so take it out and process the rest.\n query_string += params.pop('q', '')\n\n # Removes bad characters from the string using regex\n query_string = re.sub('[(){}<>]', '', query_string)\n\n query_string = query_string.replace(' ', ' AND ')\n\n # Max and min grades are handled separately because they're turned into\n # inequalities.\n min_grade = params.pop('min_grade', None)\n max_grade = params.pop('max_grade', None)\n\n # Test for overlap between the search requested and the max/min of\n # the content we've got.\n if min_grade is not None:\n type_strings.append('max_grade >= {}'.format(min_grade))\n if max_grade is not None:\n type_strings.append('min_grade <= {}'.format(max_grade))\n\n for field_name in params:\n # Make everything else a list.\n values = params[field_name]\n if type(values) is not list:\n values = [values]\n if len(values) > 0: # protect against empty lists\n value_strings = [] # e.g. 'tag:foo'\n for value in values:\n formatted_value = '\\\"' + value.lower() + '\\\"'\n value_strings.append(field_name + ': ' + formatted_value)\n type_strings.append('(' + ' OR '.join(value_strings) + ')')\n\n query_string = ' AND '.join(type_strings) + ' ' + query_string\n\n logging.info(\"Searching for:\")\n logging.info(query_string)\n\n return query_string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the Cell IDs of a well in Paraview. Creates a query string that can be copied to Paraview's find function.
def findWellBlocks(well_name, welltracks_df, vtr_file): grid = readVTK(vtr_file) ids = findIntersectedBlocks(well_name, welltracks_df, grid) query_dict = {'id':ids} return createQueryString(query_dict)
[ "def find_wells(input_text, **kwargs):\n session = get_global_session()\n return session.find_wells(input_text, **kwargs)", "def _index_of_cell(self, query):\n x = 0\n for table in self.doc.tables:\n y = 0\n for row in table.rows:\n z = 0\n for cell in row.cells:\n if query.strip().lower() in cell.text.lower():\n return (x, y, z)\n z += 1\n y += 1\n x += 1", "def retrieveCellIds(cls, listOfPoints):", "def _where(self):\n result = []\n result.extend(self._partition_selector())\n result.extend(self._job_and_fuzzer_selector())\n\n result = ' AND '.join(result)\n if result:\n return 'WHERE ' + result\n\n return ''", "def row_and_col_where(self, where=None):\n row_idxs_where = self.row_idxs_where(where)\n if where is None:\n return row_idxs_where\n row_idxs = []\n for row_idx in row_idxs_where:\n if len(self.col_names_where(row_idx, where)) == len(where):\n row_idxs.append(row_idx)\n return row_idxs", "def find(self):\n sql_cols = \"tokenid, token, lemma, pos, feat, head, deprel, align_id, id, sentence_id, text_id, contr_deprel, contr_head\"\n sqlq = \"SELECT {0} FROM {1} WHERE align_id in ({2}) order by align_id, id\".format(sql_cols, Db.searched_table, self.subquery)\n wordrows = Db.con.dictquery(sqlq,self.subqueryvalues)\n print('Analyzing...')\n if wordrows:\n self.pickFromAlign_ids(wordrows)\n if self.isparallel:\n self.FindParallelSegmentsAfterwards()\n else:\n return input('Nothing found..')", "def params(rows=10,page=1):\n return 'searchString=&searchField=&searchOper=&_search=true&nd=1320094066485&rows=%s&page=%s&sidx=CertifyingAgent&sord=asc' % (str(rows),str(page))", "def findRoom(soup, sessionObj, _idNumber, _room_number):\n\n\t# Parse the nested tables for the id assoc. with target room\n\tpotentialRooms = soup.findAll('span', id=re.compile('ctl00_ContentPlaceHolder1_gvTimeSlots_ct.+'))\n\tprint potentialRooms\n\n\n\tsys.exit(0)\n\t\n\t# If room not found; does not exist or already reserved\n\tif ROOM_TAG == None:\t\n\t\traise Exception(\"[ERROR] Room %s @ %s Not Found\" % (_room_number, targetRoom))\n\n\t# Find the hidden variables required in request\n\tviewState = soup.find('input', { 'name': '__VIEWSTATE' }).get('value')\n\tviewStateGen = soup.find('input', { 'name': '__VIEWSTATEGENERATOR' }).get('value')\n\teventValidation = soup.find('input', { 'name': '__EVENTVALIDATION' }).get('value')\n\n\tindex = ROOM_TAG.rfind('LnkBtn_') + len('LnkBtn_')\n\tclickedLinkButtonValue = ROOM_TAG[index:]\n\t\n\t__EVENTTARGET = ROOM_TAG.replace('_', '$', 2)\n\n\tpackage = {'__EVENTTARGET': __EVENTTARGET, '__EVENTARGUMENT': '', 'ctl00$hidCardno': _idNumber,\n\t\t'ctl00$hidGoogleMapKey': 'ABQIAAAAJKUVL-MrwDN5PN4e9ptZlRT2yXp_ZAY8_ufC3CFXhHIE1NvwkxTptz2NMSRojYVwzZ2DgnujQSVluA', 'ctl00$hidGoogleMapZoomLevel': '12',\n\t\t'ctl00$hidGoogleMapLat': '49.244654', 'ctl00$hidGoogleMapLng': '-122.970657', 'ctl00$hidEnableGoogeMapScript': 'x', \n\t\t'ctl00$ContentPlaceHolder1$hidClickedLinkButtonValue': clickedLinkButtonValue, 'ctl00$ContentPlaceHolder1$hid_PoolMachineDisplayName': 'To be determined', \n\t\t'__VIEWSTATE': viewState, '__VIEWSTATEGENERATOR': viewStateGen, '__EVENTVALIDATION': eventValidation \n\t\t}\n\n\tresponse = sessionObj.post(TIME_SLOTS_URL, data=package, headers=HEADERS, cookies=sessionObj.cookies)\n\treceipt = BeautifulSoup(response.text, 'html.parser')\n\n\tif response.status_code == requests.codes.ok:\n\t\treturn True\n\n\treturn False", "def find_parameter(self, text, col):\r\n\r\n df = self.parameters\r\n findtext = df[df[col].str.contains(text, regex=True) == True].values\r\n return findtext", "def test_find_cell_id(self):\n pnt = Point(0, 0, 1)\n out = IntPoint(0, 0, 0)\n\n out = py_find_cell_id(pnt, 1.0)\n self.assertEqual(out.x, 0)\n self.assertEqual(out.y, 0)\n self.assertEqual(out.z, 1)\n\n pnt.x = -2.01\n out = py_find_cell_id(pnt, 1.0)\n self.assertEqual(out.x, -3)\n self.assertEqual(out.y, 0)\n self.assertEqual(out.z, 1)\n\n pnt.y = -1.01\n out = py_find_cell_id(pnt, 1.0)\n self.assertEqual(out.x, -3)\n self.assertEqual(out.y, -2)\n self.assertEqual(out.z, 1)", "def finddataid(db,datamd5='',mapmd5='',getall=False):\n\tDebug(1,'findexpid for datamd5 %s mapmd5 %s' % (datamd5,mapmd5))\n\tdetails=[]\n\tif datamd5:\n\t\tdetails.append(['DataMD5',datamd5])\n\tif mapmd5:\n\t\tdetails.append(['MapMD5',mapmd5])\n\tif len(details)==0:\n\t\tDebug(6,'Error. MapMD5 and DataMD5 both missing from finddataid')\n\t\treturn None\n\n\trdata={}\n\trdata['details']=details\n\tres=requests.get(db.dburl+'/experiments/get_id',json=rdata)\n\tif res.status_code==200:\n\t\texpids=res.json()['expId']\n\t\tif not getall:\n\t\t\tif len(expids)>1:\n\t\t\t\tDebug(6,'Problem. Found %d matches for data' % len(expids))\n\t\t\tDebug(2,'Found study id %d' % expids[0])\n\t\t\treturn expids[0]\n\t\tDebug(2,\"Found %d matches to data\" % len(expids))\n\t\treturn expids\n\tDebug(8,'Error getting expid from details')\n\treturn None", "def find_row_col_wells(self):\n\n # number of pixels within which found wells are considered to be within the same row\n if self.well_shape == 'circle':\n interval = self.wells['r'].mean() # average radius across circles (3rd column)\n elif self.well_shape == 'square':\n interval = self.wells['r'].mean() # this is just half the template size anyway\n # maybe change that?\n\n # execute same loop for both rows and columns\n for d,lp in zip(['x','y'],['col','row']): # d = dimension, lp = lattice place\n # initialise array or row/column labels. This is a temporary variable, I could have just used self.wells[lp]\n d_ind = np.full(self.wells.shape[0],np.nan)\n cc = 0; # what label are we assigning right now\n # loop until all the labels have been assigned\n while any(np.isnan(d_ind)):\n # find coordinate of first (leftmost or topmost) non-labeled well\n idx_unlabelled_wells = np.isnan(d_ind)\n unlabelled_wells = self.wells.loc[idx_unlabelled_wells]\n coord_first_well = np.min(unlabelled_wells[d])\n # find distance between this and *all* wells along the considered dimension\n d_dists = self.wells[d] - coord_first_well;\n # find wells within interval. d_dists>=0 discards previous rows [columns]\n # could have taken the absolute value instead but meh I like this logic better\n idx_same = np.logical_and((d_dists >= 0),(d_dists < interval))\n # doublecheck we are not overwriting an existing label:\n # idx_same should point to positions that are still nan in d_ind\n if any(np.isnan(d_ind[idx_same])==False):\n pdb.set_trace()\n elif not any(idx_same): # if no wells found within the interval\n pdb.set_trace()\n else:\n # assign the row [col] label to the wells closer than\n # interval to the topmost [leftmost] unlabelled well\n d_ind[idx_same] = cc\n # increment label\n cc+=1\n # end while\n # assign label array to right dimension\n self.wells[lp] = d_ind.astype(int)\n\n # checks: if 24 wells => 4 entries only, if 48 either 3x3 or 3x2\n if self.n_wells == 24:\n _is_2x2 = self.wells.shape[0] == 4 and \\\n self.wells.row.max() == 1 and \\\n self.wells.col.max() == 1\n if not _is_2x2:\n self.plot_wells()\n raise Exception(\"Found wells not in a 2x2 arrangement, results are unreliable\");\n elif self.n_wells == 48:\n _is_3x2 = self.wells.shape[0] == 6 and \\\n self.wells.row.max() == 2 and \\\n self.wells.col.max() == 1\n _is_3x3 = self.wells.shape[0] == 9 and \\\n self.wells.row.max() == 2 and \\\n self.wells.col.max() == 2\n if not (_is_3x2 or _is_3x3):\n self.plot_wells()\n raise Exception(\"Found wells not in a 3x2 or 3x3 arrangement, results are unreliable\");\n elif self.n_wells == 96:\n _is_4x4 = self.wells.shape[0] == 16 and \\\n self.wells.row.max() == 3 and \\\n self.wells.col.max() == 3\n if not _is_4x4:\n self.plot_wells()\n raise Exception(\"Found wells not in a 4x4 arrangement, \"\n + \"results are unreliable\");\n return", "def findId(page=''):\n\tif page:\n\t\tm = re.findall(r'{{soccerway\\s*\\|([A-Za-zÀ-ÖØ-öø-ÿ\\-]+\\/\\d+)', page.text, re.IGNORECASE)\n\t\tif m:\n\t\t\treturn m[0]\n\t\tm = re.findall(r'{{soccerway\\s*\\|id=([A-Za-zÀ-ÖØ-öø-ÿ\\-]+\\/\\d+)', page.text, re.IGNORECASE)\n\t\tif m:\n\t\t\treturn m[0]\n\telse:\n\t\tprint('Error in retrieving information from article.\\n')\n\treturn ''", "def ids_query(doc_ids):\n return {'query': ids_selector(doc_ids)}", "def bin_search_id(boarding_pass: str) -> int:\n rows = boarding_pass[:7]\n seats = boarding_pass[7:]\n low, high = 0, 127\n for step in rows:\n if step == \"F\": # lower half\n high = (high + low) // 2\n elif step == \"B\": # upper half\n low = ((high + low) // 2) + 1\n left, right = 0, 7\n for seat in seats:\n if seat == \"L\": # lower half\n right = (left + right) // 2\n elif seat == \"R\": # upper half\n left = ((left + right) // 2) + 1\n return (low * 8) + left", "def search_annotDB(annotDB, pwm, reportLocations=False):\n for id, annot in annotDB.iteritems():\n motifHits = pwm.find_in_region(annot.sequence)\n if reportLocations:\n yield id, motifHits\n else:\n yield id", "def find_ids(self, regexp, param, provider_ref=None):\n\n if param not in ['ref', 'ref_provider']:\n raise ValueError(\"Param should equal either 'ref' or 'ref_provider'\")\n\n found_ids = []\n station = self.db_conn.get_table('station')\n provider = self.db_conn.get_table('provider')\n query = select([station.c.id,\n station.c.ref,\n station.c.ref_provider,\n label('provider_ref', provider.c.ref)],\n station.c.provider_id==provider.c.id)\n with self.db_conn.trans() as trans:\n values = trans.get_data(query)\n\n for item in values:\n if provider_ref and provider_ref != item['provider_ref']:\n continue\n if re.match(regexp, item[param]):\n found_ids.append(item['id'])\n return found_ids", "def _tag_ivar_well_cells(_, gb: pp.GridBucket) -> None:\n box = gb.bounding_box(as_dict=True)\n nd = gb.dim_max()\n for g, d in gb:\n tags = np.zeros(g.num_cells)\n if g.dim < nd:\n point = np.array(\n [\n [(box[\"xmin\"] + box[\"xmax\"]) / 2],\n [box[\"ymax\"]],\n [0],\n ]\n )\n distances = pp.distances.point_pointset(point, g.cell_centers)\n indexes = np.argsort(distances)\n if d[\"node_number\"] == 1:\n tags[indexes[-1]] = 1 # injection\n elif d[\"node_number\"] == 3:\n tags[indexes[-1]] = -1 # production\n # write_well_cell_to_csv(g, indexes[-1], self)\n g.tags[\"well_cells\"] = tags\n pp.set_state(d, {\"well\": tags.copy()})", "def query(identifiers, debug=False, closest=False, around=0):\n global simbad_site, dbfile\n \n if not isinstance(identifiers, list): # make a scalar into a list\n identifiers = [identifiers]\n if closest and len(identifiers)>1:\n print 'ID:', identifiers, len(identifiers)\n print 'closest=True only for single query...'\n return \n \n ngroup = 40 # max number of objects to query at the same time\n if len(identifiers)>ngroup: # slice list\n # group by ngroup\n res = []\n while len(identifiers)>0:\n #print len(identifiers), 'objects to query'\n res.extend(query(identifiers[:ngroup], debug=debug, closest=closest,\n around=around))\n identifiers = identifiers[ngroup:]\n return res\n \n ###########################################################\n ### from here, assumes identifiers is a list of strings ### ###########################################################\n \n # check if it is in the DataBase\n if os.path.isfile(dbfile) and around==0:\n dbf = open(dbfile)\n db = cPickle.load(dbf)\n dbf.close()\n res = []\n for i in identifiers:\n if db.has_key(i):\n #print i, 'found in local DB'\n res.append(db[i])\n else:\n #print i, 'NOT found in local DB'\n res.append({})\n else:\n res = [{} for i in identifiers]\n \n # -- all target found in database\n if all([r.has_key('IDENTIFIER') for r in res]):\n return res\n \n rt_ = '%0D%0A' # cariage return\n plus_ = '%2B' # + in the URL\n separator = ';'\n format_ = \"format+object+form1+\\\"\"+separator+\"+%25IDLIST(1)+\"+separator+\"+%25COO(A+D)+\"+separator+\"+%25OTYPE+\"+separator+\"+%25SP+\"+separator+\"+%25PM(A+D)+\"+separator+\"+%25PLX(V+E)+\"+separator+\"+%25FLUXLIST(B)+\"+separator+\"+%25FLUXLIST(V)+\"+separator+\"+%25FLUXLIST(R)+\"+separator+\"+%25FLUXLIST(J)+\"+separator+\"+%25FLUXLIST(H)+\"+separator+\"+%25FLUXLIST(K)+\"+separator+\"+%25MEASLIST(rot;|F)+\"+separator+\"%25MEASLIST(iras;|F)\"+separator+\"%25MEASLIST(JP11;|F)\"+\"\\\"\"\n\n url = 'simbad/sim-script?submit=submit+script&script='+format_\n \n Nquery = 0\n IDquery = []\n for k,i in enumerate(identifiers):\n if not res[k].has_key('IDENTIFIER'):\n Nquery+=1\n IDquery.append(i)\n obj = i.replace('+', plus_)\n obj = obj.replace('_', ' ')\n obj = obj.replace(' ', '+')\n if ':' in i: # these must be coordinates!\n url = url+rt_+'query+coo+'+obj+'+radius%3D5s'\n elif around>0:\n url = url+rt_+'query+around+'+obj+'+radius%3D'+str(around)+'m'\n else:\n url = url+rt_+'query+id+'+obj\n\n if debug:\n print simbad_site+url\n try:\n lines = urllib2.urlopen(simbad_site+url, timeout=20).read()\n except:\n simbad_site = alternate_site\n print 'switching to alternate server...'\n try:\n lines = urllib2.urlopen(simbad_site+url, timeout=20).read()\n except:\n raise NameError('servers do not respond OR no internet connection')\n \n if debug:\n print lines\n lines = lines.split('\\n')\n\n # go to data\n for k, l in enumerate(lines): \n if ':error:' in l:\n #print ' ERROR:', lines[k+2]\n #print '------------------------------'\n #print lines\n return None\n if ':data:' in l:\n lines = lines[k+1:]\n break\n\n lines = filter(lambda x: len(x)>0, lines)\n\n if len(lines)!=Nquery and not closest and around==0:\n print ' ERROR: too many/few results!'\n return None\n \n if debug:\n print lines\n\n # read every line which is a different object \n for k, l in enumerate(lines):\n obj = {}\n if around>0:\n obj['IDENTIFIER'] = 'around: '+identifiers[0]\n else:\n obj['IDENTIFIER'] = IDquery[k]\n \n obj['NAME'] = l.split(separator)[1].strip() \n\n if '-' in l.split(separator)[2]:\n l_ra = l.split(separator)[2].split('-')[0]\n l_dec = '-'+l.split(separator)[2].split('-')[1]\n else:\n l_ra = l.split(separator)[2].split('+')[0]\n l_dec = '+'+l.split(separator)[2].split('+')[1]\n\n obj['RA'] = l_ra.strip()\n obj['DEC'] = l_dec.strip()\n \n if len(l_ra.split())==3:\n obj['RA.h'] = (float(l_ra.split()[0])+\n float(l_ra.split()[1])/60.+\n float(l_ra.split()[2])/3600.)\n elif len(l_ra.split())==2:\n obj['RA.h'] = (float(l_ra.split()[0])+\n float(l_ra.split()[1])/60.)\n else:\n obj['RA.h'] = float(l_ra.split()[0])\n \n obj['RA D'] = obj['RA.h']*15\n \n if len(l_dec.split())==3:\n obj['DEC.d'] = abs(float(l_dec.split()[0]))+\\\n float(l_dec.split()[1])/60.+\\\n float(l_dec.split()[2])/3600.\n elif len(l_dec.split())==2:\n obj['DEC.d'] = abs(float(l_dec.split()[0]))+\\\n float(l_dec.split()[1])/60.\n else:\n obj['DEC.d'] = abs(float(l_dec.split()[0]))\n \n obj['DEC.d'] = math.copysign(obj['DEC.d'],\n float(l_dec.split()[0]))\n \n # 15th Jan at midnight is ~ LST 6:00\n obj['TRANSIT MONTH'] = int(round((obj['RA.h']-6.00)/2.-1, 0))%12+1\n obj['TYPE'] = l.split(separator)[3].split('~')[0].strip()\n obj['SPTYPE'] = l.split(separator)[4].strip().split()[0]\n\n try:\n obj['PMA'] = float(l.split(separator)[5].split()[0])/1000.\n obj['PMD'] = float(l.split(separator)[5].split()[1])/1000.\n except:\n obj['PMA'] = 0.0\n obj['PMD'] = 0.0\n\n try:\n obj['PLX'] = float(l.split(separator)[6].split()[0])/1000.\n obj['EPLX'] = float(l.split(separator)[6].split()[1])/1000.\n except:\n obj['PLX'] = 0.0\n obj['EPLX'] = 0.0\n\n mags = ['B','V','R','J','H','K']\n for j, m in enumerate(mags):\n try:\n obj[m+'MAG'] = float(l.split(separator)[7+j].split()[1])\n except:\n try:\n # take first number\n tmp = l.split(separator)[7+j]\n for i in range(len(tmp)):\n if tmp[i].isdigit(): \n break\n obj[m+'MAG'] = float(tmp[i:].split()[0])\n except:\n obj[m+'MAG'] = np.nan\n try:\n obj['VSINI'] = float(l.split(separator)[13].split('|')[0].split()[0])\n except:\n obj['VSINI'] = -1 # failed\n iras_wl = ['12um', '24um', '60um', '100um']\n \n obj['IRAS'] = dict(zip(iras_wl, np.zeros(len(iras_wl)))) \n for i,j in enumerate(iras_wl):\n try:\n obj['IRAS'][j] = float(l.split(separator)[14].split('|')[i].split()[0])\n except:\n obj['IRAS'][j] = np.nan\n \n JP11_wl = ['U', 'B', 'V', 'R', 'I', 'J', 'K', 'L', 'M', 'N', 'H']\n obj['JP11'] = dict(zip(JP11_wl, np.zeros(len(JP11_wl)))) \n for i,j in enumerate(JP11_wl):\n try:\n obj['JP11'][j] = float(l.split(separator)[15].split('|')[i].split()[0])\n except:\n obj['JP11'][j] = np.nan\n if np.isnan(obj['KMAG']) and not np.isnan(obj['JP11']['K']):\n obj['KMAG']= obj['JP11']['K']\n \n \n res[identifiers.index(IDquery[k])] = obj\n if closest:\n break\n \n if around>0:\n for k in range(len(res)):\n res[k]['DIST D'] = math.sqrt( (res[0]['DEC.d']-res[k]['DEC.d'])**2+\n math.cos(res[0]['DEC.d']*3.1415/180)**2*\n (res[0]['RA D']-res[k]['RA D'])**2)\n res[k]['DIST S'] = res[k]['DIST D']*3600 \n res = addApproxDiam(res, verbose=False)\n \n if around==0:\n try:\n if not isinstance(db, dict):\n db = {}\n except:\n db = {}\n \n for k,i in enumerate(IDquery):\n db[i]= res[k]\n \n dbf = open(dbfile, 'w')\n cPickle.dump(db, dbf, 2)\n dbf.close()\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete Buffer Object and any associated query object
def delete(self): if self.__buffer__ is not None: glDeleteBuffers(1,[self.__buffer__]) self.__buffer__=None if self.__query__ is not None: glDeleteQueries(1, [self.__query__]) self.__query__=None
[ "def delete(obj):", "def purge(self):\n\tif self.isReferenced():\n\t log.warning('This function is not designed for referenced buffer nodes')\n\t return False\n\t\n userAttrs = mc.listAttr(self.mNode,userDefined = True) or []\n for attr in userAttrs:\n if 'item_' in attr:\n attributes.doDeleteAttr(self.mNode,attr)\n #log.debug(\"Deleted: '%s.%s'\"%(self.mNode,attr)) \n \n self.l_buffer = []\n self.d_buffer = {}", "def delete(self):\n self.device_buffer.delete() # pytype: disable=attribute-error\n self.device_buffer = deleted_buffer\n self._npy_value = None", "def delete_request():", "def clearBuffers(self) -> None:\n ...", "def delete_record():", "def delete(self):\n failed, model, entity = self._get_model_and_entity(True, True)\n if failed: return\n entity.delete()\n self._serve({})", "def delete(persistent_query, user):\n\n persistent_query.delete()", "def delete(obj: ModelItem) -> None:\n mongo_obj = from_brewtils(obj)\n\n if hasattr(mongo_obj, \"deep_delete\"):\n mongo_obj.deep_delete()\n else:\n mongo_obj.delete()", "def _DelObject(self, _id):\n if self._vbo is None:\n return\n index = self._indices[_id]\n num_values = type(self).__num_values\n del self._indices[_id]\n if not (index == self._max_index):\n self._empty_indices.append(index)\n else:\n self._max_index -= 1\n self._vbo[\n index * num_values:(index + 1) * num_values] = nzeros(num_values, \"f\")", "def remove(self, obj_or_id):\n\t\tif type(obj_or_id) is str or type(obj_or_id) == unicode:\n\t\t\tdel self.connection[obj_or_id]\n\t\telse:\n\t\t\tif not getattr(obj_or_id, \"id\", None):\n\t\t\t\traise Exception(\"unknown object\", obj_or_id)\n\t\t\tdel self.connection[obj_or_id.id]\n\t\tself.sync()", "def delete(self, sql):", "def deleteModelObject(param):\n if isinstance(param, db.Key) :\n return db.delete(param)\n if isinstance(param, db.Model) :\n return db.delete(param)\n if isinstance(param, str) :\n return db.delete(db.Key(param))\n if isinstance(param, dict) :\n return db.delete(db.Key(param['id']))", "def __del__(self):\n self.savedb()\n del self.db\n del self.task", "def __del__(self):\n self.clear()", "def __del__( self ):\n\t\tLiFlame.degrid()", "def delete(self):\n if self.data:\n self.data.delete()\n super(Resource, self).delete()", "def del_kb(self):\n self.kb = None", "def test_moments_query_delete(self):\n body = Query()\n response = self.client.open(\n '/moments/query',\n method='DELETE',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the .fgd files specified in the config file
def loadFGDFiles(self): self.fgd = Fgd() numVals = LEConfig.fgd_files.getNumUniqueValues() if numVals == 0: QtWidgets.QMessageBox.critical(None, LEGlobals.AppName, "No FGD files specified in local config!", QtWidgets.QMessageBox.Ok) sys.exit(1) for i in range(numVals): fgdFilename = LEConfig.fgd_files.getUniqueValue(i) fgd = FgdParse(fgdFilename) self.fgd.add_include(fgd)
[ "def read_dfg(file_path):\n from pm4py.objects.dfg.importer import importer as dfg_importer\n dfg, start_activities, end_activities = dfg_importer.apply(file_path)\n return dfg, start_activities, end_activities", "def get_cfg():\n cfg_path = os.path.join(os.path.expanduser('~'), '.cfgnfo')\n\n cfg_info = {}\n config = ConfigParser.ConfigParser()\n config.read(cfg_path)\n\n for sect in config.sections():\n cfg_info[sect] = {}\n for opt in config.options(sect):\n cfg_info[sect][opt] = config.get(sect, opt)\n\n return cfg_info", "def read_fgong(filename):\n # Standard definitions and units for FGONG\n glob_pars = [('mass', 'g'),\n ('Rphot', 'cm'),\n ('Lphot', 'erg/s'),\n ('Zini', None),\n ('Xini', None),\n ('alpha', None),\n ('phi', None),\n ('xi', None),\n ('beta', None),\n ('dp', 's'),\n ('ddP_drr_c', None),\n ('ddrho_drr_c', None),\n ('Age', 'yr'),\n ('teff', 'K'),\n ('Gconst', 'cm3/gs2')]\n loc_pars = [('radius', 'cm'),\n ('ln(m/M)', None),\n ('Temp', 'K'),\n ('P', 'kg/m/s2'),\n ('Rho', 'g/cm3'),\n ('X', None),\n ('Lumi', 'erg/s'),\n ('opacity', 'cm2/g'),\n ('eps_nuc', None),\n ('gamma1', None),\n ('grada', None),\n ('delta', None),\n ('cp', None),\n ('free_e', None),\n ('brunt_A', None),\n ('rx', None),\n ('Z', None),\n ('R-r', 'cm'),\n ('eps_logg', None),\n ('Lg', 'erg/s'),\n ('xhe3', None),\n ('xc12', None),\n ('xc13', None),\n ('xn14', None),\n ('xo16', None),\n ('dG1_drho', None),\n ('dG1_dp', None),\n ('dG1_dY', None),\n ('xh2', None),\n ('xhe4', None),\n ('xli7', None),\n ('xbe7', None),\n ('xn15', None),\n ('xo17', None),\n ('xo18', None),\n ('xne20', None),\n ('xh1', None),\n ('na38', None),\n ('na39', None),\n ('na40', None)]\n\n # Start reading the file\n ff = open(filename, 'r')\n lines = ff.readlines()\n\n # Read file definitions from the fifth line (first four is comments)\n NN, ICONST, IVAR, IVERS = [int(i) for i in lines[4].strip().split()]\n if not ICONST == 15:\n raise ValueError('cannot interpret FGONG file: wrong ICONST')\n\n # Data storage\n data = []\n starg = {}\n\n # Read the file from the fifth line onwards\n # Change in the format for storing the numbers (February 2017):\n # - If IVERS <= 1000, 1p5e16.9\n # - If IVERS > 1000, 1p,5(x,e26.18e3)\n if IVERS <= 1000:\n for line in lines[5:]:\n data.append([line[0 * 16:1 * 16], line[1 * 16:2 * 16],\n line[2 * 16:3 * 16], line[3 * 16:4 * 16],\n line[4 * 16:5 * 16]])\n else:\n for line in lines[5:]:\n data.append([line[0 * 27:1 * 27], line[1 * 27:2 * 27],\n line[2 * 27:3 * 27], line[3 * 27:4 * 27],\n line[4 * 27:5 * 27]])\n\n # Put the data into arrays\n data = np.ravel(np.array(data, float))\n for i in range(ICONST):\n starg[glob_pars[i][0]] = data[i]\n data = data[15:].reshape((NN, IVAR)).T\n\n # Reverse the profile to get center ---> surface\n data = data[:, ::-1]\n\n # Make it into a record array and return the data\n starl = np.rec.fromarrays(data, names=[lp[0] for lp in loc_pars])\n\n # Exclude the center r = 0. mesh (MESA includes it)\n if starl['radius'][0] < 1.e-14:\n starl = starl[1:]\n\n return starg, starl", "def _read_config_files(self, *, base: str = '') -> None:\n self.__used_config_files = frozenset(self.read(os.path.join(base, f) for f in reversed(self.CONFIG_FILES)))", "def read_fireball_records( local_dir):\n logger = logging.getLogger()\n local_dir = os.path.abspath( os.path.realpath( local_dir) )\n logger.info( 'read_fireball_records_path, ' + local_dir)\n records = []\n files = glob.glob( os.path.join( local_dir, '*fireball.cfg'))\n for fle in files:\n logger.debug( 'fireball_record_found, ' + str(fle) )\n records.append( read_fireball_record( os.path.join( local_dir, fle) ))\n logger.debug( 'fireball_records_loaded, ' + str(records) )\n return records", "def load_cfg(self):\n self.df_cfg = pd.read_csv(FILE_IDACTCFG)\n for idx, row in self.df_cfg.iterrows():\n #if row['enabled']==1:\n self.cfg[row['ds_name']] = row\n \n if not os.path.isfile(FILE_IDACTFUSION):\n if os.path.isfile(FILE_IDACTFUSION_SRC):\n self.df_fusion = pd.read_csv(FILE_IDACTFUSION_SRC)\n else: \n self.df_fusion = pd.read_csv(FILE_IDACTFUSION)\n self.load_fusion()", "def read(self):\n\n # Add options from config file.\n print self._config.get_all()\n for id, (val, type) in self._config.get_all().items():\n if type == 'src' and not self.check(id, val): # Don't use wrong paths\n log.warning(_('idg.options.not.valid.use.default') + id +\\\n \" \" + val)\n continue\n self._opts[id] = [val, type]\n\n dom = self._config.dom()\n if dom is None:\n log.error(_('idg.options.cant.parse.config.file') +\\\n self._config.path())\n return\n else:\n log.info(_('idg.options.using.config.file') + self._config.path())", "def read_config_file():\n \n MIN_RUN_TIME = 300 # min five minutes between runs\n \n config = configparser.ConfigParser(allow_no_value=True)\n configdata = {}\n \n config.read('backgrounder.ini')\n \n configdata['path'] = {}\n configdata['path']['image'] = config['path']['image']\n configdata['subreddits'] = config['subreddits']['subreddits']\n configdata['postsave'] = config['postsave']['method']\n configdata['timing'] = config['timing']['seconds']\n configdata['other'] = {}\n configdata['other']['ignore_duplicates'] = config['other']['ignore_duplicates']\n configdata['other']['download_gallery'] = config['other']['download_gallery']\n \n # validate user-entered config\n valid_dict = validate_config(configdata)\n for key, val in valid_dict.items():\n if val is False:\n messagebox.showinfo('Warning', 'There was an error reading backgrounder.ini.\\n\\nPlease delete your data.pkl file and rerun the program.'\n % (key))\n return None\n \n process_configdata(configdata)\n \n return configdata", "def read_fchk(in_name):\n with open(in_name) as data:\n lines = data.readlines()\n grad = []\n reading = False\n for line in lines:\n if line[0].isalpha():\n reading = False\n if reading == True:\n for num in map(float, line.split()):\n grad.append(num)\n if line.startswith(\"Cartesian Gradient\"):\n reading = True\n if line.startswith(\"Total Energy\"):\n energy = float(line.split()[3])\n if line.startswith(\"SCF Energy\"):\n scf_energy = float(line.split()[3])\n grad = np.array(grad)\n return energy, grad, scf_energy", "def read(self, path):\n RawConfigParser.read(self, path)\n\n path_d = path + \".d\"\n files = []\n\n if os.path.exists(path_d):\n files = [ os.path.join(path_d, f) for f in os.listdir(path_d) ]\n files.sort()\n\n for fname in files:\n p = RawConfigParser()\n p.read(fname)\n for section_name in p.sections():\n # New files override old, so remove first to avoid DuplicateSectionError.\n self.remove_section(section_name)\n self.add_section(section_name)\n for (name, value) in p.items(section_name):\n self.set(section_name, name, value)\n # Store the filename this section was read from.\n self.set(section_name, '_filename_', fname)", "def get_all_the_names_of_libgen_configuration_file():\n command='ls -l /etc/LibgenConfig*'\n out = connections.execute_mml_without_check(command)\n\n configuration_file_name_list = []\n line_list = out.split(\"\\r\\n\")\n for line in line_list:\n if line.count(\"etc\") == 1:\n match = re.search(r\"\\s*/etc/LibgenConfig_(.*).ini\", line, re.I) \n if match is None:\n exceptions.raise_ILError(\"ILKeywordSyntaxError\", \"there is no configuration file in the node\")\n else: \n configuration_file_name_list.append(match.group(1))\n return configuration_file_name_list", "def _readStdConfigFiles(cls):\n\n # Default one first\n cls.readConfigFile(DEFAULT_CONFIG)\n\n # Site specific one can override properties defined in default\n cls.readConfigFile(USER_CONFIG)", "def read_config_file():\n file_found = 0\n filename = URLNET_CFG\n search_path=os.environ['PATH']\n paths = ['.',]\n # allow for the possibility that there is no HOME env variable\n home = None\n try:\n home = os.environ['HOME']\n except Exception, e:\n pass\n # \n if home != None and len(home) > 0:\n paths.append(home)\n paths = paths + split(search_path, pathsep)\n \n for path in paths:\n if exists(join(path, filename)):\n file_found = 1\n break\n if file_found:\n path = abspath(join(path, filename))\n try:\n fd = open(path)\n lines = fd.readlines()\n fd.close()\n return lines\n except Exception, e:\n return None\n else:\n return None", "def host_fact_files(self):\n fact_files = []\n fact_dir = self.config.get('fact_dir')\n all_files = os.listdir(fact_dir)\n for f in all_files:\n fn = os.path.join(fact_dir, f)\n if os.path.isfile(fn):\n fact_files.append((f, fn))\n return fact_files", "def get_config_files(self):\n if package.backend.FORMAT == \"rpm\":\n return [\"sysconfig/clamd.amavisd\", \"tmpfiles.d/clamd.amavisd.conf\"]\n return []", "def get_configs_from_multiple_files():\n eval_config = eval_pb2.EvalConfig()\n with tf.gfile.GFile( FLAGS.eval_config_path, 'r' ) as f:\n text_format.Merge( f.read(), eval_config )\n\n model_config = model_pb2.DetectionModel()\n with tf.gfile.GFile( FLAGS.model_config_path, 'r' ) as f:\n text_format.Merge( f.read(), model_config )\n\n input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile( FLAGS.input_config_path, 'r' ) as f:\n text_format.Merge( f.read(), input_config )\n\n return model_config, eval_config, input_config", "def __readConfig(self):\r\n\r\n\t\tfr = open(self.__configFilePath, 'r')\r\n\t\t\r\n\r\n\t\tfor line in fr.readlines():\r\n\t\t\tline = line.strip()\r\n\t\t\tif line == \"\":\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif line[0] != '#': # ignore lines start by #\r\n\t\t\t\tsp = line.split('=')\r\n\t\t\t\tif len(sp) == 2:\r\n\t\t\t\t\tkey = sp[0].strip()\r\n\t\t\t\t\tval = sp[1].strip()\r\n\t\t\t\t\tself.__configDict[key] = val\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.__print(\"Ignore config line: \" + line)\r\n\r\n\t\tself.__print(\"Read configs from: %s\\n%d configs read!\" \\\r\n\t\t\t\t\t\t\t\t % (self.__configFilePath, len(self.__configDict)) \\\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\tfr.close()", "def readin(self):\n \n if self.filename.endswith('.fits'):\n # Assumes Science Verification data\n self.read_SV_fits()\n elif self.filename.endswith('.npz'): \n # Assumes DES Y3 Gold data\n self.read_Y3_2_2_npz()\n else: \n print('Unrecognized file type: ' + self.filename)", "def load_cfg_gpio():\t\n\tif 'directories' not in cfg_main or 'daemon-config' not in cfg_main['directories'] or 'config' not in cfg_daemon:\n\t\treturn\n\telse:\t\t\n\t\tconfig_dir = cfg_main['directories']['daemon-config']\n\t\t# TODO\n\t\tconfig_dir = \"/mnt/PIHU_CONFIG/\"\t# fix!\n\t\tconfig_file = cfg_daemon['config']\n\t\t\n\t\tgpio_config_file = os.path.join(config_dir,config_file)\n\t\n\t# load gpio configuration\n\tif os.path.exists(gpio_config_file):\n\t\tconfig = configuration_load(LOGGER_NAME,gpio_config_file)\n\t\treturn config\n\telse:\n\t\tprint \"ERROR: not found: {0}\".format(gpio_config_file)\n\t\treturn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares a Chrome driver that puts the searches into querybyschool mode with the department set to Computer Science.
def prep_query_by_school_driver(): driver = webdriver.Chrome(os.path.join(os.getcwd(), 'chromedriver')) columbia_url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(COLUMBIA_ID) driver.get(columbia_url) driver.find_element_by_class_name('close-this').click() dept_input = driver.find_element_by_xpath("//input[@placeholder='Enter Your Department']") dept_input.send_keys('Computer Science') cs_option = driver.find_element_by_xpath("//li[@data-value='Computer Science']") cs_option.click() return driver
[ "def setup_driver():\n # Import webdriver for interactive webpages\n from selenium import webdriver\n\n # Initiate Selenium using the chrome browser, would be nice to have future editions include other browsers\n chromedriver = Chromedriver_path\n driver = webdriver.Chrome(executable_path=chromedriver)\n return driver", "def setup_chromedriver():\n global chrome_options\n global driver\n print(time() + \"[ INFO ] Starting Chromedriver...\")\n chrome_options = Options()\n if args.without_headless == False:\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--log-level=3')\n chrome_options.add_argument('--disable-gpu')\n if len(proxy) > 0:\n chrome_options.add_argument('--proxy-server=%s' % proxy)\n prefs={\"profile.managed_default_content_settings.images\": 2, 'disk-cache-size': 4096 }\n chrome_options.add_experimental_option('prefs', prefs)\n chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\n driver = webdriver.Chrome(CDP,\n options=chrome_options)\n\n driver.delete_all_cookies()", "def initialize():\r\n global driver\r\n logging.basicConfig(\r\n format='[%(asctime)s] [%(levelname)s] %(message)s', level=LOG_LEVEL\r\n )\r\n read_config()\r\n time.sleep(0.1)\r\n check_or_get_campus_il_auth()\r\n driver = Chrome()\r\n driver.maximize_window()", "def main():\r\n foundDriver = True\r\n #Initiate Driver & Navigate to get all cities and towns in the U.S.\r\n try:\r\n driver = webdriver.Chrome(chromedriver)\r\n except:\r\n print('#========================================================')\r\n input('# \"Chromedriver\" executable needs to be in PATH (User Desktop).\\n#Please see https://sites.google.com/a/chromium.org/chromedriver/home to download\\nEnter any key to quit\\n#========================================================')\r\n exit()\r\n foundDriver = False\r\n \r\n if foundDriver == True:\r\n print('#===================================')\r\n print('# Do not close the chrome window')\r\n print('# If you see the current website visually fully loaded you can click the X button to force stop loading\\n# Everything we need to scrape is already on the screen')\r\n print('#===================================')\r\n driver.get('https://www.britannica.com/topic/list-of-cities-and-towns-in-the-United-States-2023068')\r\n #State is the Key and Values are list of city/towns\r\n dictionary = {}\r\n length = len(dictionary)\r\n theRange = list(range(326620, 326670))\r\n #States \r\n sName = driver.find_elements_by_class_name('h1')\r\n for i in range(len(sName)):\r\n #Append state as Key and Cities & Towns for \r\n dictionary[sName[i].text] = [x.text for x in driver.find_elements_by_css_selector('#ref' + str(theRange[i]) + '> ul > li')]\r\n print('\\nNext step, Yelp.')\r\n #YELP\r\n url = 'https://www.yelp.com/search?find_desc=Massage%20Therapy&find_loc=' #Change Massage Therapist to what ever you're looking for\r\n\r\n #Lists holding companies data\r\n company = []\r\n phone = []\r\n state = []\r\n city = []\r\n print('\\n')\r\n print('This will take a very very long time. Once counter reaches ' + str(length) + ', program is done.\\n')\r\n counterReach = 1\r\n for x,y in dictionary.items():\r\n print(counterReach)\r\n for v in y:\r\n yelpURL = url + x.lower() + '%2C%20' + v.lower() + '&cflt=massage_therapy'# If you're not using this for massage_therapy remove the filter '&cflt=vet' or add your own filter\r\n #User output\r\n print('#========================')\r\n print('# STATE: ' + x)\r\n print('# CITY: ' + v)\r\n\r\n driver.get(yelpURL)\r\n dataOnPage = True\r\n try:\r\n pages = int(driver.find_element_by_xpath('//*[@id=\"wrap\"]/div[3]/div[2]/div[2]/div/div[1]/div[1]/div/div[1]/div/div[1]/span').text.split(' ')[-1])\r\n except:\r\n print('# No Data on Page') #If no data is on page(No Vet in this city) loop is done\r\n dataOnPage = False \r\n if dataOnPage == True:\r\n print('# PAGES: ' + str(pages))\r\n counter = 0\r\n print('#========================')\r\n for page in range(pages - 1):#Loop through each page within city and append\r\n try: \r\n c = driver.find_elements_by_xpath('//*[@id=\"wrap\"]/div[3]/div[2]/div[2]/div/div[1]/div[1]/div/ul/li/div/div/div[1]/div[2]/div/div[1]/div[1]/div[1]/h3/a')\r\n except:\r\n print('#######################################################################')\r\n print('No Company Names')\r\n print('#######################################################################')\r\n try:\r\n p = driver.find_elements_by_xpath('//*[@id=\"wrap\"]/div[3]/div[2]/div[2]/div/div[1]/div[1]/div/ul/li/div/div/div[1]/div[2]/div/div[2]/div/div[1]')\r\n except:\r\n print('#######################################################################')\r\n print('No Phone Numbers')\r\n print('#######################################################################') \r\n if len(c) == len(p):\r\n #Extract text from web elements\r\n [company.append(i.text) for i in c]\r\n [phone.append(i.text) for i in p]\r\n #Append the city and state\r\n for q in range(len(c)):\r\n state.append(x)\r\n for q in range(len(c)):\r\n city.append(v)\r\n else:#Skip page page array lengths dont match\r\n print('Skipping Page')\r\n #To get to the next page add &start= incremeting by 10\r\n counter += 10\r\n driver.get(yelpURL + '&start=' + str(counter))\r\n print('')\r\n counterReach += 1\r\n #Todays date to name CSV\r\n date = datetime.today().strftime('%Y-%m-%d')\r\n #Output data to CSV\r\n with open('C:/Users/Henry/Desktop/Yelp-Scrape/' + str(date) + '.csv', 'w', newline='') as f: #<------------- Change location you wish to create file\r\n fieldNames = ['Name', 'Phone', 'City', 'State', ]\r\n thewriter = csv.DictWriter(f, fieldnames = fieldNames)\r\n thewriter.writeheader()\r\n for i in range(len(company)):\r\n thewriter.writerow({'Name' : company[i], 'City' : city[i], 'State' : state[i], 'Phone' : phone[i]})\r\n\r\n input('All done your CSV can be found on your desktop folder Yelp-Scrape')", "def issue_driver_query(query_URL, query_params=None):\n\n if os.environ['USER'] == 'ubuntu': \n display = Display(visible=0, size=(800, 600))\n display.start()\n \n driver = webdriver.Firefox()\n driver.implicitly_wait(10)\n driver.get(query_URL)\n time.sleep(3)\n\n while 'beta' in driver.current_url: \n driver.close()\n driver = webdriver.Firefox()\n driver.implicitly_wait(10)\n driver.get(query_URL)\n\n # Wait for the page to render. \n time.sleep(random.randint(7, 15))\n \n if query_params: \n # Find search boxes. \n title_search = driver.find_element_by_id(query_params[0][0])\n location_search = driver.find_element_by_id(query_params[1][0])\n\n # Clear search boxes and enter text. \n title_search.clear()\n location_search.clear()\n title_search.send_keys(query_params[0][1])\n time.sleep(random.randint(3, 5))\n location_search.send_keys(query_params[1][1])\n\n # Execute that query! \n location_search.send_keys(Keys.ENTER)\n\n return driver", "def setUp(self):\n\t\tglobal driver\n\t\tdriver = webdriver.Firefox()\n\t\t# driver = Ie()\n\t\t# driver = webdriver.Chrome()\n\t\tdriver.get(\"http://www.amazon.com\")", "def launch_browser(self):\n self.driver = webdriver.Chrome()", "def chrome_driver(self):\n self.chrome_options = webdriver.ChromeOptions()\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n self.chrome_options.add_experimental_option(\"prefs\", prefs)\n self.driver = webdriver.Chrome(self.path_to_chrome_driver,options=self.chrome_options)\n self.driver.implicitly_wait(5)", "def Get_SetupDriver(url, headless=True):\n try: \n options = Options()\n if headless:\n # WebDriver opens Chrome with browser\n options.add_argument(\"--headless\")\n driver = webdriver.Chrome(options=options)\n driver.get(url)\n return driver\n except Exception as e:\n print(e)\n print(\n f\"Failed to set up Chrome WebDriver\")\n return None", "def _mock_query_youtube(self, ys):\n ys.driver.get(self.SEARCH_PAGE_URL)\n self._ys_driver = ys.driver\n ys.query_youtube = self._mocked_query_youtube", "def tests_setup(request):\n logging.info(\"Initializing the Selenium Driver\")\n driver = webdriver.Firefox()\n driver.maximize_window()\n\n # use the same driver for all the test cases\n request.cls.driver = driver\n\n login_to_kubeflow(driver)\n\n # Run the test cases\n yield driver\n # After all the test cases where run\n\n logging.info(\"Closing the Selenium Driver\")\n driver.close()", "def __init__(self):\n options = Options()\n options.add_argument('-headless')\n self.path = \"C:\\\\Users\\\\weimaoquan\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Application\\\\chromedriver.exe\"\n self.browser = webdriver.Chrome(executable_path=self.path, options=options)\n self.browser.implicitly_wait(3)\n self.login()", "def set_chrome_options():\n chrome_options = webdriver.ChromeOptions()\n # chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument(\"--disable-blink-features=AutomationControlled\")\n chrome_options.add_argument(\"window-size=923,1011\")\n user_agent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 \" \\\n \"Safari/537.36 \"\n chrome_options.add_argument(f'user-agent={user_agent}')\n\n chrome_prefs = dict()\n chrome_options.experimental_options[\"prefs\"] = chrome_prefs\n chrome_options.experimental_options[\"excludeSwitches\"] = [\"enable-automation\"]\n chrome_options.experimental_options[\"useAutomationExtension\"] = False\n chrome_prefs[\"profile.default_content_settings\"] = {\"images\": 2}\n return chrome_options", "def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(10)", "def setUp(self):\n logger.debug('Initializing web driver')\n # Initialize the web driver\n self.driver = webdriver.Chrome(os.path.join(\n os.getcwd(), settings.SELENIUM['CHROMEDRIVER_PATH']))\n\n # Initialize page controllers\n self.search_controller = SearchController(self.driver)\n self.checkout_controller = CheckoutController(self.driver)", "def automate_scrolling():\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('--ignore-certificate-errors')\r\n options.add_argument(\"--incognito\")\r\n options.add_argument(\"--window-size=1920x1080\")\r\n options.add_argument(\"--headless\")\r\n driver = webdriver.Chrome(chrome_options=options,\r\n executable_path=r\"C:\\Users\\Carmen\\Downloads\\chromedriver_win32\\chromedriver.exe\")\r\n return driver", "def load_driver(gene_url):\n global driver\n if \"driver\" in dir(): \n driver.quit()\n driver = webdriver.Chrome()\n driver.get(gene_url);\n driver.find_element_by_partial_link_text(\"Display Options\").click()\n seq = Select(driver.find_element_by_id(\"seqsPerPage\"))\n seq.select_by_value(\"2000\")\n driver.find_element_by_id(\"displayCmd\").submit()", "def scraper(page):\n\n # Initialize empty lists\n titles = []\n urls = []\n techs = []\n instructors = []\n\n # Start scraper and get course blocks\n soup = BeautifulSoup(page, 'html')\n div = soup.findAll(\"div\", { \"class\": \"course-block\"})\n\n # Loop over all courses\n for element in div:\n a = element.find(\"a\", { \"class\": \"course-block__link\"})\n\n # Get url\n url = 'https://www.datacamp.com' + a.get('href')\n\n # Get tech\n if a.contents[1].get(\"class\")[1] == 'course-block__technology--r':\n tech = 'R'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--python':\n tech = 'Python'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--sql':\n tech = 'SQL'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--git':\n tech = 'Git'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--shell':\n tech = 'Shell'\n\n # Get title\n title = [element.get_text() for element in a.select(\"h4\")][0]\n\n # Get instructor\n instructor_div = element.find(\"div\", { \"class\": \"course-block__author-body\"})\n instructor = [element.get_text() for element in instructor_div.select(\"p\")][0]\n\n # Write information in lists\n titles.append(title)\n urls.append(url)\n techs.append(tech)\n instructors.append(instructor)\n\n # Write ordered dictionary and return it\n courses = OrderedDict({'Course': titles,\n 'URL': urls,\n 'Tech': techs,\n 'Instructor': instructors})\n\n return courses", "def fill_data_search_attribute_new_query(driver, search_attribute_select_layer_textbox_text, search_attribute_textbox_Field_text, search_attribute_textbox_Operaror_text, search_attribute_textbox_Value_text, query_name, search_attribute_tool_name, search_attribute_sub_layer_name, tools_advanced_search_search_attribute_item, index, ws_index):\r\n\r\n fill_data_advanced_search_attribure_query(driver, \"Select Layer\", search_attribute_select_layer_textbox_text)\r\n\r\n fill_data_advanced_serach_attribute_query_ddl(driver, \"Select Field\", tools_advanced_search_search_attribute_item, index, ws_index)\r\n AppCommanUtility.select_ddl_item(driver, search_attribute_textbox_Field_text, tools_advanced_search_search_attribute_item, index, ws_index)\r\n\r\n fill_data_advanced_serach_attribute_query_ddl(driver, \"Select Operator\", tools_advanced_search_search_attribute_item, index, ws_index)\r\n AppCommanUtility.select_ddl_item(driver, search_attribute_textbox_Operaror_text, tools_advanced_search_search_attribute_item, index, ws_index)\r\n\r\n AppCommanUtility.fill_data_tools_fields(driver, \"Eg: 10\", search_attribute_textbox_Value_text)\r\n select_query_title(driver, query_name, tools_advanced_search_search_attribute_item, index, ws_index)\r\n\r\n attribute_query_savequery_button = driver.find_element_by_class_name(\"attributeQuerySaveQueryButton\")\r\n attribute_query_savequery_button.click()\r\n time.sleep(1)\r\n\r\n AppCommanUtility.click_popup_close_icon(driver, search_attribute_tool_name)\r\n\r\n attribute_query_search_button = driver.find_element_by_class_name(\"attributeQuerySearchButton\")\r\n attribute_query_search_button.click()\r\n time.sleep(1)\r\n\r\n AppCommanUtility.click_popup_close_icon(driver, search_attribute_tool_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the names and url's of professors for this school. If only_take_top_20, only the top (most reviewed) professors are included this is easier because the top 20 are shown when the page loads. If all professors are desired, then the driver iterates through the alphabet and takes the top 20 for each filtered result (e.g. professor names starting with 'A'). This process usually gets all of the possible professors for the school, unless one school has more than 20 professors starting with one letter.
def get_professors_from_school(driver, school_id, only_take_top_20 = False): url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(school_id) driver.get(url) num_professors = int(driver.find_element_by_xpath("//span[@class='professor-count']").text) if num_professors == 0: return num_professors, [] if only_take_top_20 or num_professors < 20: return num_professors, get_current_list_of_professors(driver) results = [] letter_filters = driver.find_elements_by_xpath("//a[@class='result']") for filter in letter_filters: filter_text = filter.text.strip() if filter_text != 'ALL': filter.click() time.sleep(.05) results += get_current_list_of_professors(driver) results = set(results) return num_professors, results
[ "def collect_professors_per_school(only_take_top_20):\n school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb'))\n sorted_schools = sorted(list(school2id.keys()))\n print(len(sorted_schools))\n school2info = {}\n driver = prep_query_by_school_driver()\n total_num_profs = 0\n total_num_prof_pages = 0\n for i, school in enumerate(sorted_schools):\n try:\n sid = school2id[school]\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=only_take_top_20)\n total_num_profs += num_profs\n total_num_prof_pages += len(prof_pages)\n school = school.strip()\n school2info[school] = (sid, num_profs, prof_pages)\n pickle.dump(school2info, open('../rate_my_prof/school2info.pkl', 'wb'))\n print('{}. School: {}. Num CS profs: {} -> SUCCESS'.format(i, school, num_profs, len(prof_pages)))\n except Exception as e:\n print('{}. School: {} -> FAILED'.format(i, school), e)\n driver.quit()\n print('Processed {} schools'.format(len(school2info)))\n print('{} CS profs in total'.format(total_num_profs))\n print('{} prof pages collected'.format(total_num_prof_pages))", "def test_get_professor_list(self):\n url = reverse('institute-professors-list', args=['IC'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def get_top_reviewers(self, top_stats_count=1):\n return sorted(self.reviewers.iteritems(), lambda x, y: cmp(x[1][0], y[1][0]), reverse=True)[:top_stats_count]", "def edit_professors_per_school():\n driver = prep_query_by_school_driver()\n fn = '../1.rate_my_prof/school2info.pkl'\n school2info = pickle.load(open(fn, 'rb'))\n missing_before = 0\n missing_now = 0\n for school, (sid, num_profs, prof_pages) in school2info.items():\n if len(prof_pages) < num_profs:\n missing_before += num_profs - len(prof_pages)\n try:\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=False)\n print('{} -> got {} out of {}'.format(school, len(prof_pages), num_profs))\n missing_now += num_profs - len(prof_pages)\n school2info[school] = (sid, num_profs, prof_pages)\n except:\n print('Failed parsing {} -> no change'.format(school))\n missing_now += num_profs - len(prof_pages) # still missing same amount\n print('Missing {} profs before, missing {} profs now'.format(missing_before, missing_now))\n pickle.dump(school2info, open(fn, 'wb'))", "def get_current_list_of_professors(driver):\n results = []\n list_elems = driver.find_elements_by_xpath(\"//li[contains(@id, 'my-professor')]\")\n for li in list_elems:\n link = li.find_element_by_tag_name('a')\n url = link.get_attribute('href')\n name = link.find_element_by_class_name('name').text.split('\\n')[0]\n last, first = name.split(', ', 1)\n results.append((first + ' ' + last, url))\n return results", "def top_reputation_directors():\n reader = initialize_reader()\n director_list = [{\n \"director\": row[1],\n \"scored\": (float(row[4]) + float(row[25])) / 2\n } for row in reader if row[4] and row[25]]\n directors = []\n for director in director_list:\n iterable = (list(x.get('director') for x in directors))\n if director.get('director') not in iterable:\n directors.append({\n \"director\": director.get('director'),\n \"scored\": director.get('scored')\n })\n else:\n director_list.remove(director)\n new_list = sorted(\n directors,\n key=lambda i: i['scored'],\n reverse=True\n )\n top_five = new_list[:5]\n if directors:\n print(\" \\n Top 5 the best directors \\n\")\n top = 0\n for director in top_five:\n top = top + 1\n top_director = director.get(\"director\")\n top_scored = director.get(\"scored\")\n print(f\"Top {top} is {top_director} with {top_scored} scored\")", "def get_available_topics_professors(self):\n professors = []\n for topic in self.available_topics:\n try:\n for prof in topic.professors.all():\n professors.append(prof)\n except:\n # Topic or Professor doesn't exist\n pass\n return list(set(professors))", "def get_top_task_submitters(self, top_stats_count=1):\n return sorted(self.reviewers.iteritems(), lambda x, y: cmp(x[1][1], y[1][1]), reverse=True)[:top_stats_count]", "def test_top_students(self):\n self.assertEqual(hw4.top_students(self.cs122, 2), ['Anna', 'Alex'])\n self.assertEqual(hw4.top_students(self.cs122, 10),\n ['Anna', 'Alex', 'Zoe', 'Dan'])\n self.assertEqual(hw4.top_students(self.empty_class, 6), [])", "def get_top_performer_by_most_completed_courses():\n try:\n top_performering_user_to_complete_most_course = list()\n\n completion_count = Counter(cd['user'] for cd in certifications_data)\n for index, users in enumerate(completion_count.most_common(5)):\n user_details = {}\n user_details[\"id\"] = index\n user_details[\"user_id\"] = user_dict_obj[users[0]][\"id\"]\n user_details[\"user_email\"] = user_dict_obj[users[0]][\"email\"]\n user_details[\"user_firstname\"] = user_dict_obj[users[0]][\"firstName\"]\n user_details[\"user_lastname\"] = user_dict_obj[users[0]][\"lastName\"]\n user_details[\"total_courses_completed\"] = users[1]\n\n top_performering_user_to_complete_most_course.append(user_details)\n return TopPerformerByMostCompletedCoursesListResponse().dump({\"top_performering_user_to_complete_most_course\": top_performering_user_to_complete_most_course})\n except Exception as e:\n print('Error at /api/v1/get-top-performer-by-most-completed-courses: ', e)", "def less_criticized():\n reader = initialize_reader()\n movies_less = []\n for row in reader:\n if(row[2]):\n movies_less.append({\"name\": row[11], \"num_critic_for_users\": int(row[2])}) \n new_list = sorted(movies_less, key=lambda i: i['num_critic_for_users'])\n topTenList = new_list[:10]\n top = 0\n print(\"Top 10 Movies less criticized \\n\")\n for movie in topTenList:\n top = top + 1\n print(f\"Top {top} is {movie.get('name')} with {movie.get('num_critic_for_users')}\")", "def all_cohorts_top_struggle_standards(teacher_id):\n\n scores_list = []\n\n cohorts = model.Cohort.query.filter_by(teacher_id=teacher_id).all()\n\n most_recent_tests = get_most_recent_tests(cohorts)\n\n student_ids = []\n standards_list = []\n\n for cohort in cohorts:\n students = cohort.studentcohorts\n for student in students:\n student_ids.append(student)\n\n for test_id in most_recent_tests:\n scores = model.Score.query.filter_by(test_id=test_id, student_id=student_ids[0].student.id).all()\n for score in scores:\n standards_list.append(score.standard)\n\n for standard in standards_list:\n scores_by_standard = {}\n scores_by_standard[\"name\"] = standard.code\n scores_by_standard[\"description\"] = standard.description\n scores_by_standard[\"id\"] = standard.id\n scores_by_standard[\"students\"] = []\n total_scores = len(student_ids)\n\n for test_id in most_recent_tests:\n m_count = 0\n a_count = 0\n fb_count = 0\n\n scores = model.Score.query.filter_by(test_id=test_id, standard_id=standard.id).all()\n\n for score in scores:\n if score.score == \"M\":\n m_count += 1\n elif score.score == \"A\":\n a_count += 1\n elif score.score == \"FB\":\n fb_count += 1\n scores_by_standard[\"students\"].append(score.student.first_name + \" \" + score.student.last_name)\n\n m_percent = (float(m_count) / float(total_scores)) * 100\n scores_by_standard[\"percent\"] = m_percent\n scores_list.append(scores_by_standard)\n scores_by_standard[\"students\"].sort()\n\n scores_list.sort(key=itemgetter(\"percent\"))\n\n return scores_list", "def get_presenters_print(self):\r\n return self.presenters.order_by('last_name')", "def get_top_comment_submitters(self, top_stats_count=1):\n return sorted(self.reviewers.iteritems(), lambda x, y: cmp(x[1][2], y[1][2]), reverse=True)[:top_stats_count]", "def get_presenters(self):\r\n return self.presenters.order_by('-leader', 'last_name')", "def test_get_professor_detail(self):\n url = reverse('institute-professors-detail',\n args=['IC', 'Pedro Rezende'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def topMatches(prefs,person,n=10,similarity=sim_distance):\n scores=[(similarity(prefs,person,other),other) for other in prefs if other!=person]\n # Sort the list so the highest scores appear at the top\n scores.sort()\n scores.reverse()\n return scores[0:n]\n #return scores", "def top_actors():\n reader = initialize_reader()\n actor_list = [{\"actor\": row[10], \"scored\": (float(row[4]) + float(row[25])) / 2 } for row in reader if row[4] and row[25]]\n actors = []\n for actor in actor_list:\n if actor.get('actor') not in list(x.get('actor') for x in actors):\n actors.append({\"actor\": actor.get('actor'), \"scored\": actor.get('scored')})\n else:\n actor_list.remove(actor) \n new_list = sorted(actors, key=lambda i: i['scored'], reverse=True)\n top_five = new_list[:5]\n\n if actors:\n print(\" \\n Top 5 the best actors \\n\")\n top = 0\n for actor in top_five:\n top = top + 1\n print(f\"Top {top} is {actor.get('actor')} with {actor.get('scored')} scored\")", "def prep_query_by_school_driver():\n driver = webdriver.Chrome(os.path.join(os.getcwd(), 'chromedriver'))\n columbia_url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(COLUMBIA_ID)\n driver.get(columbia_url)\n driver.find_element_by_class_name('close-this').click()\n dept_input = driver.find_element_by_xpath(\"//input[@placeholder='Enter Your Department']\")\n dept_input.send_keys('Computer Science')\n cs_option = driver.find_element_by_xpath(\"//li[@data-value='Computer Science']\")\n cs_option.click()\n return driver" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current professors listed on a school's page, given its filter settings.
def get_current_list_of_professors(driver): results = [] list_elems = driver.find_elements_by_xpath("//li[contains(@id, 'my-professor')]") for li in list_elems: link = li.find_element_by_tag_name('a') url = link.get_attribute('href') name = link.find_element_by_class_name('name').text.split('\n')[0] last, first = name.split(', ', 1) results.append((first + ' ' + last, url)) return results
[ "def get_professors_from_school(driver, school_id, only_take_top_20 = False):\n url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(school_id)\n driver.get(url)\n num_professors = int(driver.find_element_by_xpath(\"//span[@class='professor-count']\").text)\n if num_professors == 0:\n return num_professors, []\n if only_take_top_20 or num_professors < 20:\n return num_professors, get_current_list_of_professors(driver)\n results = []\n letter_filters = driver.find_elements_by_xpath(\"//a[@class='result']\")\n for filter in letter_filters:\n filter_text = filter.text.strip()\n if filter_text != 'ALL':\n filter.click()\n time.sleep(.05)\n results += get_current_list_of_professors(driver)\n results = set(results)\n return num_professors, results", "def collect_professors_per_school(only_take_top_20):\n school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb'))\n sorted_schools = sorted(list(school2id.keys()))\n print(len(sorted_schools))\n school2info = {}\n driver = prep_query_by_school_driver()\n total_num_profs = 0\n total_num_prof_pages = 0\n for i, school in enumerate(sorted_schools):\n try:\n sid = school2id[school]\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=only_take_top_20)\n total_num_profs += num_profs\n total_num_prof_pages += len(prof_pages)\n school = school.strip()\n school2info[school] = (sid, num_profs, prof_pages)\n pickle.dump(school2info, open('../rate_my_prof/school2info.pkl', 'wb'))\n print('{}. School: {}. Num CS profs: {} -> SUCCESS'.format(i, school, num_profs, len(prof_pages)))\n except Exception as e:\n print('{}. School: {} -> FAILED'.format(i, school), e)\n driver.quit()\n print('Processed {} schools'.format(len(school2info)))\n print('{} CS profs in total'.format(total_num_profs))\n print('{} prof pages collected'.format(total_num_prof_pages))", "def get_available_topics_professors(self):\n professors = []\n for topic in self.available_topics:\n try:\n for prof in topic.professors.all():\n professors.append(prof)\n except:\n # Topic or Professor doesn't exist\n pass\n return list(set(professors))", "def test_get_professor_list(self):\n url = reverse('institute-professors-list', args=['IC'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def find_professors():\n daemo.publish(\n project_key=PROJECT_KEY,\n tasks=[\n {\n \"stream\": \"Computer Science\",\n \"institute\": \"Stanford University\"\n },\n {\n \"stream\": \"Bioengineering\",\n \"institute\": \"Stanford University\"\n },\n ],\n approve=approve_correct_response,\n completed=rate_workers\n )", "def edit_professors_per_school():\n driver = prep_query_by_school_driver()\n fn = '../1.rate_my_prof/school2info.pkl'\n school2info = pickle.load(open(fn, 'rb'))\n missing_before = 0\n missing_now = 0\n for school, (sid, num_profs, prof_pages) in school2info.items():\n if len(prof_pages) < num_profs:\n missing_before += num_profs - len(prof_pages)\n try:\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=False)\n print('{} -> got {} out of {}'.format(school, len(prof_pages), num_profs))\n missing_now += num_profs - len(prof_pages)\n school2info[school] = (sid, num_profs, prof_pages)\n except:\n print('Failed parsing {} -> no change'.format(school))\n missing_now += num_profs - len(prof_pages) # still missing same amount\n print('Missing {} profs before, missing {} profs now'.format(missing_before, missing_now))\n pickle.dump(school2info, open(fn, 'wb'))", "def list(self, request) -> QuerySet:\n if request.user.has_perm(\"user.can_retrieve_all_users\"):\n return self.get_queryset().all()\n elif request.user.has_perm(\"user.can_retrieve_users_in_school\"):\n #TODO: implment this\n pass \n else:\n raise PermissionError(\"You cannot retrieve users that way.\")", "def prof_obj(professors):\r\n plist = [(prof(row.profID, row.Name, row.years_of_exp)) for index, row in professors.iterrows()]\r\n return plist", "def getPeople(self):\n\n secman = getSecurityManager()\n \n #There *has* to be a better way to do this...\n localPeople = self.getReferences(relationship='classifications_people')\n\n #Get the intersection of people referenced to this classification and people within/referenced to the parent\n classificationPeople = list(set(localPeople) & set(self.aq_parent.getPeople()))\n \n #Determine the valid people to show\n visiblePeople = []\n currentDateTime = DateTime()\n for person in classificationPeople:\n if currentDateTime >= person.getEffectiveDate() and (currentDateTime < person.getExpirationDate() or person.getExpirationDate() is None):\n if secman.checkPermission(View, person):\n visiblePeople.append(person)\n \n #Return only the visible people\n return visiblePeople", "def test_get_professor_detail(self):\n url = reverse('institute-professors-detail',\n args=['IC', 'Pedro Rezende'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def get_pricings_for_list(event, users):\n pricings = RegConfPricing.objects.none()\n \n for user in users:\n pricings = pricings | get_available_pricings(event, user)\n pricings = pricings | get_available_pricings(event, AnonymousUser())\n \n # return the QUERYSET\n return pricings", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = vpnalwaysonprofile()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def profiles():\n profs = UserProfile.query.order_by(UserProfile.lastname).all()\n return render_template('profiles.html', users=profs)", "def filter_list_queryset(self, request, queryset, view):\n\n # Staff can always see everything\n if request.user.is_staff:\n return queryset\n\n # Publication.objects.filter(membership__user=request.user)\n return queryset.filter(members__user=request.user)", "def get_proficiencies(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.ProficiencyList(self._results, runtime=self._runtime)", "def get_friends_profiles(self):\n friends = Friend.objects.friends(self)\n profiles = [get_object_or_404(UserProfile, username=friend.username) for friend in\n friends]\n return profiles", "def professor_dashboard(request):\n assignments = list(Assignment.objects.filter(owner = request.user.professor).all())\n\n return render_to_response(\"professor/index.html\", \\\n {'user': request.user, 'assignments': assignments})", "def get_prousers(self, count: int = 5):\n resp = sess.get(api.pro_users % (self.symbol, count))\n dt = resp.ok and resp.json()\n self.prousers = [User(i) for i in dt]", "def get_users_by_workers(request):\n is_active_worker = True\n w = None\n if request.method == \"GET\" and 'workers' in request.GET:\n try:\n w = int(request.GET.get(\"workers\"))\n except Exception, e:\n w = 1\n is_active_worker = bool(w)\n if is_active_worker:\n users = User.objects.exclude(userprofile__user_type__pk=1).filter(userprofile__is_active=True).order_by(\"-userprofile__is_active_worker\")\n else:\n users = User.objects.exclude(userprofile__user_type__pk=1).filter(userprofile__is_active_worker=is_active_worker, userprofile__is_active=True)\n return users, is_active_worker" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the professor page and their reviews.
def parse_professor_page(url): r = requests.get(url) soup = BeautifulSoup(r.content, 'html.parser') reviews_heading = soup.find('div', attrs={'data-table':'rating-filter'}) if reviews_heading is None: return 0, [] num_reviews = int(reviews_heading.text.split()[0]) reviews_table = soup.find('table', attrs={'class':'tftable'}) reviews = [] for row in reviews_table.find_all('tr')[1:]: if row.get('id'): reviews.append(_parse_reviews_row(row)) return num_reviews, reviews
[ "def parse_individual_review(self, html_webpage, url_webpage):\n \n #Name of the location\n re_location_name = re.compile(r\"ui_pill inverted.*\\\"(.*)\\\"<\", re.S)\n \n #Name of the entity\n re_entity_name = re.compile(r\"HEADING.+>(.*)<\", re.S)\n \n re_user_name = re.compile(r\"scrname.+>(.+)<\", re.S)\n re_review_rating = re.compile(r\"reviewItemInline.+ui_bubble_rating bubble_([0-5][0-5])\", re.S)\n re_review_date = re.compile(r\"raitingDate relativeDate.+title=\\\"(.+)\\\"\", re.S)\n re_review_title = re.compile(r\"quote.+noQuotes\\\">(.+)<\")\n re_review_body = re.compile(r\"p.+partial_entry\\\">.*\\\"(.+)\\\"\")\n \n location_name = re_location_name.match(html_webpage)\n \n entity_name = re_entity_name.match(html_webpage)\n \n user_name = re_user_name.match(html_webpage)\n user_id = self.__parse_user_id(url_webpage)\n review_raiting = re_review_rating.match(html_webpage)\n review_date = re_review_date.match(html_webpage)\n review_title = re_review_title.match(html_webpage)\n review_body = re_review_body.match(html_webpage)\n \n self.__review = {\n ReviewFields.REVIEW_TYPE_ENTITY.value: self.__entity_type,\n ReviewFields.REVIEW_LOCATION_ID.value: self.__entity_location,\n ReviewFields.REVIEW_LOCATION_NAME.value: location_name,\n ReviewFields.REVIEW_ENTITY_ID: self.__entity_id,\n ReviewFields.REVIEW_ENTITY_NAME: entity_name,\n ReviewFields.REVIEW_USER_NAME.value: user_name,\n ReviewFields.REVIEW_USER_ID.value: user_id,\n ReviewFields.REVIEW_RAITING.value: review_raiting,\n ReviewFields.REVIEW_DATE.value: review_date,\n ReviewFields.REVIEW_TITLE.value: review_title,\n ReviewFields.REVIEW_BODY.value: review_body\n }", "def extract_page_reviews(\n page_source_soup : BeautifulSoup,\n verbose : int = 0):\n reviews = page_source_soup.find_all(name=\"div\", attrs={\"data-hook\":\"review\"})\n contents = []\n for i, review in enumerate(reviews):\n try:\n content = {}\n profile = review.find(name=\"a\", attrs={\"class\":\"a-profile\"})[\"href\"]\n name = review.find(name=\"span\", attrs={\"class\":\"a-profile-name\"}).text\n rating = review.find(name=\"a\", attrs={\"class\":\"a-link-normal\"})[\"title\"]\n title = review.find(name=\"a\", attrs={\"data-hook\":\"review-title\"}).text\n date = review.find(name=\"span\", attrs={\"data-hook\":\"review-date\"}).text\n body = review.find(name=\"span\", attrs={\"data-hook\":\"review-body\"})\n helpful_count = review.find(name=\"span\", attrs={\"data-hook\":\"helpful-vote-statement\"})\n images = review.find(name=\"div\", attrs={\"class\":\"review-image-tile-section\"})\n content[\"reviewer_id\"] = extract_profile_id(profile) \n content[\"name\"] = name\n content[\"rating\"] = rating\n content[\"title\"] = title\n content[\"date\"] = date\n content[\"helpful_count\"] = helpful_count\n content[\"body\"] = body\n content[\"images\"] = images\n contents.append(content)\n except Exception as e:\n print(f\"Failed review extraction from page source, exception : {e}\")\n return contents", "def extracts_reviews(self) -> None:\n review_parts = self.data.count(review_part_start) # count review tokens\n if review_parts > 0:\n start_idx = self.data.find(review_part_start) # starting point\n end_idx = self.data.find(review_part_end, start_idx) # starting end point\n while start_idx != -1: # As long as there are still reviews\n # extract the header an find the body\n header = (\n remove_html_code(\n self.data[start_idx + len(review_part_start) : end_idx]\n )\n + \". \"\n )\n start_idx = self.data.find(review_part_start, end_idx)\n end_idx = self.data.find(review_part_end, start_idx)\n # extract the body\n content = remove_html_code(\n self.data[start_idx + len(review_part_start) : end_idx]\n )\n start_idx = self.data.find(review_part_start, end_idx)\n end_idx = self.data.find(review_part_end, start_idx)\n # concat the header and the body, store into the review array\n self.reviews.append(header + content)", "def parse_review(review):\n review_info = {}\n id_ = review.find(class_='row')['id'].split('-')[1]\n review_info['Review Id'] = id_\n review_info['Rating'] = review.find(itemprop='ratingValue').text\n try:\n review_info['Played On'] = review.find(class_='review-play-date').text\n except AttributeError:\n pass\n try:\n review_info['Title'] = review.find(itemprop='name').text\n except AttributeError:\n pass\n for label in review.find_all(class_='label'):\n review_info[label.text] = '1'\n try:\n ratings = review.find(class_='review-secondary-ratings')\\\n .find_all('span')\n ratings = [rating.text.strip(':\\n\\t\\xa0') for rating in ratings]\n review_info.update(dict(zip(ratings[::2], ratings[1::2])))\n except AttributeError:\n pass\n paragraphs = review.find(class_='review-body').find_all('p')\n text = ' '.join([paragraph.text for paragraph in paragraphs])\n review_info['Review'] = text\n return review_info", "def retrieve_reviews_ratings(soup, idx):\n # Set container holding review details\n container = soup.findAll('div', class_=\"_2wrUUKlw _3hFEdNs8\")\n\n page_reviews = []\n page_ratings = []\n page_titles = []\n\n # Find all levels of rating\n rating_re = compile(\"ui_bubble_rating (.*)\")\n\n for item in container:\n \n rating_raw = item.find('span', class_=rating_re)\n rating_int = int(rating_raw.attrs['class'][1].split(\"_\")[1][-2])\n page_ratings.append(rating_int)\n\n review = item.find('q', class_=\"IRsGHoPm\").text\n \n # Check for more text after \"Read More\" activated, complete review text\n expanded = item.find('span', class_=\"_1M-1YYJt\")\n if expanded:\n review += expanded.text\n page_reviews.append(review)\n\n # Save review title\n title = item.find('a', class_='ocfR3SKN').text\n page_titles.append(title)\n\n # For monitoring during runtime\n print('page', idx + 1)\n \n return page_reviews, page_ratings, page_titles", "def professor_card(professor, user):\n return {\"professor\": professor, \"user\":user}", "def test_get_professor_list(self):\n url = reverse('institute-professors-list', args=['IC'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def parse_app_rev_page(self, response):\n\t\t\t\t# Start parsing this page\n\t\t\t\tself.logger.info('Parsing app reviews: %s p%d' % (response.meta['id'], response.meta['page']))\n\t\t\t\tsettings = self.crawler.settings \n\t\t\t\tfeed = feedparser.parse(response.url) \n\n\t\t\t\tif not feed.entries:\n\t\t\t\t\t\tself.logger.info('Get nothing from %s'% response.url)\n\t\t\t\t\t\treturn \n\t\t\t\t\n\t\t\t\tfor entry in feed.entries[1:]:\n\t\t\t\t\t\treview = SingleValItemLoader(item=Review(), response=response)\n\t\t\t\t\t\treview.add_value('id', app_rvw_id_re.findall(entry['id'])[0])\n\t\t\t\t\t\treview.add_value('timestamp', datetime.datetime.now().strftime(settings['TS_FMT']))\n\t\t\t\t\t\treview.add_value('title', entry['title'])\t\t\t\t\t\t\n\t\t\t\t\t\treview.add_value('app_id', response.meta['id'])\n\t\t\t\t\t\treview.add_value('comment', entry['content'][0]['value'])\n\t\t\t\t\t\treview.add_value('author_name', entry['author'])\t\t\t\t\t\t\n\t\t\t\t\t\treview.add_value('author_id', app_rvwer_id_re.findall(entry['authors'][0]['href'])[0])\n\t\t\t\t\t\treview.add_value('starRating', entry['im_rating'])\n\t\t\t\t\t\treview.add_value('version', entry['im_version'])\n\t\t\t\t\t\treview.add_value('vote', entry['im_votecount'])\n\t\t\t\t\t\treview.add_value('country', 'us')\n\t\t\t\t\t\treview.add_value('updated', datetime.datetime.fromtimestamp\n\t\t\t\t\t\t\t(int(calendar.timegm(entry['updated_parsed']))).strftime('%Y-%m-%d %H:%M:%S'))\n\t\t\t\t\t\t# print(\">>>>>>>test here 210 <<<<<<<<<<<\")\t\t\t\t\t\t\n\t\t\t\t\t\t# print(review._values['updated'])\t\t\t\t\t\t\n\n\t\t\t\t\t\tyield review.load_item()\n\t\t\t\t\t\t\n\t\t\t\t# request subsequent pages to be downloaded\n\t\t\t\t# Find out the number of review pages\n\t\t\t\tnoPages = int(app_page_re.findall(feed.feed['links'][3]['href'])[0])\n\t\t\t\t# print(\">>>>>>>test here 210 <<<<<<<<<<<\")\t\t\t\t\t\t\n\t\t\t\t# print(noPages)\n\t\t\t\tif response.meta['page'] < noPages:\n\t\t\t\t yield self._successor_page_request(response)", "def printreviews(reviews):\n p = []\n for n in reviews['reviews']:\n p.append(n['reviewText'])\n return p", "def parse_page(soup, movie_id):\n title = soup.find(attrs={'itemprop': 'name'}).string\n alt_title = soup.find(attrs={'itemprop': 'alternateName'}).string\n year = soup.find(name='small').a.string\n genres = list(genre.string for genre in soup.find_all(attrs={'itemprop': 'genre'}))\n countries = list(a.string for a in soup.find(attrs={'class': 'main'}).find_all('a') if not a.get('itemprop'))\n description = soup.find(attrs={'itemprop': 'description'}).contents[0].strip()\n director = soup.find(id='directors').find(attrs={'class': 'person'}).string\n actors = list(actor.string for actor in soup.find(id='actors').find_all(attrs={'class': 'person'}))\n imdb = soup.find(attrs={'class': 'rating'}).string\n tags = 'No tags'\n if soup.find(id='tags'):\n tags = list(tag.string for tag in soup.find(id='tags').find_all('a'))\n poster_link = soup.find(attrs={'class': 'posterbig'}).find(name='img').get('src')\n\n movie_info = {\n 'movie_id': movie_id,\n 'title': title,\n 'alt_title': alt_title,\n 'year': year,\n 'genres': genres,\n 'countries': countries,\n 'description': description,\n 'director': director,\n 'actors': actors,\n 'imdb': imdb,\n 'poster_link': poster_link\n }\n\n if tags is not 'No tags':\n movie_info['tags'] = tags\n\n return movie_info", "def scorepersentence(reviews):\n vs = []\n for sentence in reviews:\n vs.append(analyzer.polarity_scores(sentence))\n return vs", "def parse_hotellist_page(html):\n soup = BeautifulSoup(html)\n # Extract hotel name, star rating and number of reviews\n hotel_boxes = soup.findAll('div', {'class' :'listing wrap reasoning_v5_wrap jfy_listing p13n_imperfect'})\n if not hotel_boxes:\n print(\"#################################### Option 2 ######################################\")\n hotel_boxes = soup.findAll('div', {'class' :'listing_info jfy'})\n if not hotel_boxes:\n print(\"#################################### Option 3 ######################################\")\n hotel_boxes = soup.findAll('div', {'class' :'listing easyClear p13n_imperfect'})\n\n for hotel_box in hotel_boxes:\n hotel_name = hotel_box.find(\"a\", {\"target\" : \"_blank\"}).find(text=True)\n print(\"Hotel name: %s\" % hotel_name.strip())\n\n stars = hotel_box.find(\"img\", {\"class\" : \"sprite-ratings\"})\n if stars:\n print(\"Stars: %s\" % stars['alt'].split()[0])\n\n num_reviews = hotel_box.find(\"span\", {'class': \"more\"}).findAll(text=True)\n if num_reviews:\n print(\"Number of reviews: %s \" % [x for x in num_reviews if \"review\" in x][0].strip())\n\n # Get next URL page if exists, otherwise exit\n #div = soup.find(\"div\", {\"class\" : \"unified pagination \"})\n div = soup.find(\"div\", {\"class\" : \"pagination paginationfillbtm\"})\n\n # check if this is the last page\n if div.find('span', {'class' : 'guiArw pageEndNext'}):\n print(\"We reached last page\")\n return None\n # If not, return the url to the next page\n hrefs = div.findAll('a', href= True)\n for href in hrefs:\n if href.find(text = True) == '&raquo;':\n print(\"Next url is %s\" % href['href'])\n return href['href']", "def __iterateReviews(self):\n try:\n while self.fetch_next_link:\n log.debug(self.log_msg('trying to get review urls, with current uri: %s' % self.currenturi))\n search_page_soup = self.soup\n for each in self.soup.find('div', section='summaryList').ul.findAll('li', recursive=False): \n permalink_url = unicode(each.find('div',\n attrs={'class':'postTools'}).find('a',attrs={'class':'permalink toolTipElement'}).get('href'))\n #print permalink_url\n if permalink_url.startswith('http://reviews.cnet.com'):\n permalink_url = permalink_url.replace('http://reviews.cnet.com','')\n if self.fetch_next_review:\n self.__getReview(self.task.instance_data['uri'],permalink_url)\n else:\n self.fetch_next_link = False\n break\n if self.fetch_next_link:\n self.soup= search_page_soup\n #next_link = [stripHtml(each.find('a').get('href')) for each in self.soup.find('ul',attrs={'class':'pagination'}).findAll('li') if each.find('a') and each.find('a').renderContents()==\"next\" ][0]\n next_link = [x.get('href') for x in self.soup.findAll('a', 'nextButton')][0]\n if next_link.startswith('http://reviews.cnet.com'):\n next_link = next_link.replace('http://reviews.cnet.com','')\n self.currenturi = self._createSiteUrl(\"http://reviews.cnet.com%s\"%next_link)\n if self.currenturi not in self.next_url_links:\n self.next_url_links.append(self.currenturi)\n res=self._getHTML(self.currenturi)\n if res: \n self.rawpage=res['result']\n self._setCurrentPage()\n else:\n log.debug(self.log_msg(\"could not set the next url link\"))\n self.fetch_next_link = False\n self.fetch_next_review = False\n break\n else:\n log.critical(self.log_msg(\"Duplicate next url link\"))\n self.fetch_next_link = False\n self.fetch_next_review = False\n break\n return True\n except:\n log.exception(self.log_msg(\"Exception occured in __iterateReviews\"))\n self.fetch_next_link = False\n self.fetch_next_review = False\n return False", "def parse_pgp_profile_page(self):\n url = '{}/profile/{}'.format(BASE_URL, self.hu_id)\n profile_page = requests.get(url)\n\n assert profile_page.status_code == 200\n\n profile_soup = BeautifulSoup(profile_page.text, 'lxml')\n\n genome_file_links = self.parse_uploaded_div(profile_soup)\n surveys = self.parse_survey_div(profile_soup)\n\n return genome_file_links, surveys, url", "def data_from_reviews(base_url):\n\n # COMPLETE 1 OF 2 FILL-INS IN THE WORKING URL\n for die in range(1, 7):\n \n # COMPLETE 2 0F 2 FILL-INS IN THE WORKING URL\n for page in inftyrage():\n url = base_url.format(die, page)\n \n soup = BeautifulSoup(get(url).text, 'lxml')\n \n # CHECK IF WE HAVE MOVED PAST THE FINAL PAGE, BY GETTING ERROR404 \n status = soup.find('body', attrs = {'class':'error404'})\n if status is not None:\n break\n \n # GET ALL MEDIA (MOVIES/SERIES/GAMES) ON PAGE\n media = soup.find_all('article')\n\n for article in media:\n \n # GET ARTICLE URL FOR RETRIEVING FACTS\n url = article.find('h2').a['href']\n\n # GET FACTS\n data = get_facts(url)\n data['terningkast'] = die\n yield data", "def review(self, request, access_type,\n page_name=None, params=None, **kwargs):\n\n try:\n entity = self._logic.getFromKeyFieldsOr404(kwargs)\n except out_of_band.Error, error:\n return helper.responses.errorResponse(\n error, request, template=params['error_public'])\n\n # get the context for this webpage\n context = responses.getUniversalContext(request)\n responses.useJavaScript(context, params['js_uses_all'])\n context['page_name'] = '%s \"%s\" from %s' % (page_name, entity.title,\n entity.scope.name())\n context['entity'] = entity\n context['entity_type'] = params['name']\n context['entity_type_url'] = params['url_name']\n if self._show_review_not_appeared_msg:\n context['header_msg'] = self.DEF_REVIEW_NOT_APPEARED_MSG\n self._show_review_not_appeared_msg = False\n\n # get the roles important for reviewing an application\n filter = {\n 'user': user_logic.logic.getCurrentUser(),\n 'scope': entity.org,\n 'status': 'active'\n }\n\n org_admin_entity = org_admin_logic.logic.getForFields(filter, unique=True)\n mentor_entity = mentor_logic.logic.getForFields(filter, unique=True)\n\n # decide which form to use\n if org_admin_entity:\n # create a form for admin review\n # it is done here, because of the dynamic choices list for the mentors\n\n # firstly, get the list of all possible mentors to assign\n choices = []\n choices.append(('', 'No mentor'))\n\n # prefer those mentors who volunteer to mentor this proposal \n filter = {\n '__key__': entity.possible_mentors\n }\n order = ['name_on_documents']\n possible_mentors = mentor_logic.logic.getForFields(filter, order=order)\n for mentor in possible_mentors:\n choices.append((mentor.link_id, mentor.document_name()))\n\n # also list the rest of the mentors\n filter = {\n 'scope': entity.org\n }\n all_mentors = mentor_logic.logic.getForFields(filter, order=order)\n for mentor in all_mentors:\n if mentor.key() in entity.possible_mentors:\n continue\n choices.append((mentor.link_id, mentor.document_name()))\n\n dynafields = [\n {'name': 'rank',\n 'base': forms.IntegerField,\n 'label': 'Set to rank',\n 'help_text':\n 'Set this proposal to the given rank (ignores the given score)',\n 'min_value': 1,\n 'required': False,\n 'passthrough': ['min_value', 'required', 'help_text'],\n },\n {'name': 'mentor',\n 'base': forms.ChoiceField,\n 'passthrough': ['initial', 'required', 'choices'],\n 'label': 'Assign Mentor',\n 'choices': choices,\n 'required': False,\n 'help_text': 'Choose the mentor you would like to assign to this '\n 'Proposal. Choose \"No mentor\" if you don\\'t want any '\n 'mentor assigned.'\n },\n ]\n\n dynaproperties = params_helper.getDynaFields(dynafields)\n dynaproperties['clean_comment'] = cleaning.clean_html_content('comment')\n\n form = dynaform.extendDynaForm(\n dynaform=params['mentor_review_form'], \n dynaproperties=dynaproperties)\n\n else:\n # the current user is not an org admin\n if entity.org.scoring_disabled:\n # reviews are disabled, don't show score field\n form = params['locked_review_form']\n else:\n # reviews are enabled, show the score field\n form = params['mentor_review_form']\n\n if request.method == 'POST':\n return self.reviewPost(request, context, params, entity,\n form, org_admin_entity, mentor_entity, **kwargs)\n else:\n # request.method == 'GET'\n return self.reviewGet(request, context, params, entity,\n form, org_admin_entity, mentor_entity, **kwargs)", "def test_get_professor_detail(self):\n url = reverse('institute-professors-detail',\n args=['IC', 'Pedro Rezende'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def scrape_user_reviews(movies):\n user_reviews = []\n for movie in movies:\n review_count = 0\n review_movie_rank = movie[1]\n review_movie = movie[2]\n review_url = movie[6]\n # form the proper url\n review_url = f\"https://www.imdb.com/{review_url}reviews?sort=reviewVolume&dir=desc&ratingFilter=0\"\n # sleep for random time to avoid IP Block\n # sleep(randint(1, 5))\n response = requests.get(review_url).text\n soup = BeautifulSoup(response, 'lxml')\n\n for review_container in soup.find_all('div', class_='imdb-user-review'):\n review_meta = review_container.find('div', class_='display-name-date')\n review_title = review_container.a.text.strip('\\n')\n review_date = review_container.find('span', class_='review-date').text\n reviewer_rating = review_container.find('div', class_='ipl-ratings-bar')\n if reviewer_rating == None:\n reviewer_rating = ''\n else:\n reviewer_rating = reviewer_rating.text.strip('\\n')\n reviewer = review_meta.a.text\n review_content = review_container.find('div', class_='content').div.text\n review = (\n review_count,\n review_movie,\n review_movie_rank,\n review_title,\n reviewer_rating,\n reviewer,\n review_date,\n review_content\n )\n review_count += 1\n print(review_movie, review_count)\n user_reviews.append(review)\n return user_reviews", "def requestReviewInfoUnOfficial(businessID):\n reviewList = []\n #7 variables recorded from each review\n #reviewList.append([\"name\",\"location\",\"friend-count\",\"review-count\",\"photo-count\", \"elite-year\", \"rating\",\"date\",\"comment\"])\n #url for first page\n url = \"https://www.yelp.com/biz/{0}?sort_by=date_desc&start=0\".format(businessID)\n page = requests.get(url)\n #Uses beautifulsoup library to retrieve page and parsers html tree\n soup = BeautifulSoup(page.content, 'html.parser')\n #finds number of review pages to iterate through for the individual restaurant \n pageNum = getPageNumber(soup)\n print(\"{0} Number of pages: {1}\".format(businessID,pageNum))\n #increments of 20, each review page for a restaurant contains 20 reviews per a page\n for i in range(0,40,20): #currently only looking at first 2 pages since database already exists and program now justs updates. \n print(i)\n if i != 0: #for all pages that follow, must update soup\n url = \"https://www.yelp.com/biz/{0}?sort_by=date_desc&start={1}\".format(businessID,i)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #finds div with list of reviews, list is further organized as an array of divs \n reviewers = soup.find_all('div', class_= \"review review--with-sidebar\")\n numReviews = len(reviewers)\n \n for i in range(numReviews):#iterates through list of reviews organized by divs\n review = getSingleReview(reviewers[i])\n reviewList.append(review)\n \n return reviewList" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes the corpus filename from a professor's name and their page url.
def make_filename(prof_name, prof_url): tid = extract_prof_id(prof_url) prof_name_id = '_'.join(prof_name.split()) return PATH_TO_CORPUS + '{}__{}.txt'.format(prof_name_id, tid)
[ "def make_name(linkurl, topic):\n fileRegex = re.compile(r'^(.*)/(20\\d\\d)/(\\d\\d)/(\\d\\d)/(.*)$')\n mo = fileRegex.search(linkurl)\n date_part = mo.group(2)+'-'+mo.group(3)+'-'+mo.group(4)\n gen_name = date_part+'-'+topic+'-'+mo.group(5)+'.html'\n filename = os.path.join(POSTSDIR, gen_name)\n return filename", "def create_filename(self, title):\n slug = slugify(title)\n if slug in self.slugs:\n slug = slug + '_'\n if len(slug) > 100:\n slug = slug[0:100]\n self.slugs.append(slug)\n return slug + '.html'", "def _make_filename(url):\r\n # This is super naive.\r\n # Todo: Make filename when the crawler return per site\r\n # Todo: Make random filename if needed\r\n filename = url.split(\"/\")[-1]\r\n log.debug(\"Making filename: %s -> %s\", url, filename)\r\n return filename", "def page_url_to_filename( url ):\n\n filename = url[ 30 : -1 ].replace('/page', '_page') + '.html'\n\n return filename", "def _title_to_filename(title, prefix=None):\n title_part = \"-\".join(re.sub(\"[^0-9a-z]\", \" \", title.lower()).split())\n if prefix:\n prefix_part = \"-\".join(re.sub(\"[^0-9a-zA-Z]\", \" \", prefix).split())\n return f\"{prefix_part}-{title_part}.html\"\n return f\"{title_part}.html\"", "def generate_file_name(section, lab, s_name):\n s_name = s_name.lower().split(\" \") # [FirstName, LastName]\n return \"sec{0}_lab{1}_{2}-{3}.txt\".format(section, lab, s_name[0],\n s_name[1])", "def reaction_url_to_filename( url ):\n\n s = url.split( '/' )\n\n post_id = s[ 4 ]\n page = s[ -1 ].split( '=' )[ -1 ]\n\n filename = f'{post_id}_page-{page}.html'\n\n return filename", "def _pmf_doc_name(doc):\n return 'proceedings-{number}-{slug}'.format(\n number=doc.factory_parent.meeting.number,\n slug=xslugify(doc.factory_parent.type.slug).replace(\"_\", \"-\")[:128]\n )", "def classifier_document_name(self, content):\n return translit(''.join([char for char in content if char.isalpha()]) + '.txt', reversed=True)", "def get_filename(self):\n\n return \"-\".join([\n str(self.paper.module.code),\n str(self.paper.year_start),\n str(self.paper.year_stop),\n str(self.paper.sitting),\n PaperPDF.period_map[self.paper.period]]\n ) + \".pdf\"", "def make_pdf_filename(paper, pdfcontent=None):\n if paper.title in [\"\", None]:\n if pdfcontent:\n paper.title = make_hash(pdfcontent)\n else:\n paper.title = make_random_string()\n\n pdf_filename = \"{}.pdf\".format(paper.title)\n\n # don't create directories\n pdf_filename = pdf_filename.replace(\"/\", \"_\")\n\n return pdf_filename", "def nameFile(tab, ext):\n name = str(tab[0]).split(u'/')[-1].split(u',')[0].split(u'_')[:-1]\n if ext == u'.art' :\n nametxt = u'articles_'+u'_'.join(name)+u'.art'\n return nametxt\n elif ext == u'.pickle' :\n namepickle = u'articles_'+u'_'.join(name)+u'.pickle'\n return namepickle", "def get_output_name_primer_pair(primer_pair,\n output_dir):\n \n if not output_dir.endswith('/'):\n output_dir += '/'\n \n forward_name = basename(primer_pair[0]).split('_')[0] + \"_\"\n reverse_name = basename(primer_pair[1]).split('_')[0] + \"_\"\n \n amplicons_fp = output_dir + forward_name + reverse_name + \"amplicons.fasta\"\n \n return amplicons_fp", "def jp_article_uri(filename):\n\ti = filename.rfind('.') \n\tjust_name = alphaNumeric(filename[:i],'_')\n\treturn 'article/jpl/'+just_name", "def name_the_output_file(data):\n\n # Parse the input data as a URL\n parsed_data = urllib.parse.urlparse(data)\n\n # Use the path component of the URL\n path = parsed_data.path if parsed_data.path else data\n\n # Use the basename of the path as the output file name\n output = os.path.basename(path)\n\n # Set a default file name if the output is empty\n if not output:\n output = \"output\"\n\n return output", "def _get_pdf_filename(thumb_setup, pdf_setup, thumb_filename):\n pdf_base = os.path.basename(thumb_filename).replace(\".\"+thumb_setup.output_fmt, \".\"+pdf_setup.output_fmt)\n return os.path.relpath(os.path.join(pdf_setup.output_dir, pdf_base),\n self.webpage_dir)", "def _guess_corenlp_name(k):\r\n bname = os.path.basename(k.doc)\r\n if bname.startswith('file'):\r\n return None\r\n\r\n corenlp_out_file = bname + '.xml'\r\n return corenlp_out_file", "def _generate_track_filename(self, extention):\n track_filename = ''\n for char in self.title:\n if char in \" -,.;:(){}[]`~'\":\n track_filename += '_'\n else:\n track_filename += char\n\n if extention != '':\n track_filename = f'{track_filename}.{extention}'\n else:\n pass\n\n return track_filename", "def get_filename() -> str:\n filename = input(\"Enter the journal filename: \")\n return filename" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predicts the gender of a professor, given their reviews.
def predict_gender_from_reviews(reviews): m_count = 0 f_count = 0 for r in reviews: if r['text']: toks = r['text'].lower().split() counts = Counter(toks) for mp in MALE_PRONOUNS: if mp in counts: m_count += counts[mp] for fp in FEMALE_PRONOUNS: if fp in counts: f_count += counts[fp] if m_count > f_count: return 'M' if f_count > m_count: return 'F' return 'UNK'
[ "def evaluate_gender_prediction(training_gender_df, \n test_gender_df, print_flag=False):\n f1_scores = []\n method_name = ['Frequency', 'Closest']\n # read the test set for obtaining the gender column (response)\n test_set = pd.read_csv(\"../data/deaths-test.csv\")\n test_set.fillna(value=0, inplace=True)\n y_test = test_set['Gender'].values\n\n print(\"======= GENDER PREDICTION =======\")\n for column in test_gender_df.columns:\n pred = test_gender_df[column]\n # genders = test_gender_df['gender']\n sign_num = int((30 - len(column) - 2 ) / 2)\n f_score = f1_score(y_test, pred)\n f1_scores.append(f_score)\n if print_flag == True:\n print(\"=\"*sign_num, column, \"=\"*sign_num)\n print(confusion_matrix(y_test, pred))\n print(\"f1-score\\t: %.4f\" % f_score)\n\n assert(len(f1_scores) == len(method_name))\n\n # Train a classifier using the features that were previously created \n # from the text books. A few methods that were previously proven to\n # be working better with the data set are selected.\n\n # read the training set for obtaining the gender column\n train_set = pd.read_csv(\"../data/deaths-train.csv\")\n train_set.fillna(value=0, inplace=True)\n y_train = train_set['Gender'].values\n\n cls_scores, cls_mtd_name = self.gender_classifier(training_gender_df,\n y_train, test_gender_df,\n y_test)\n\n f1_scores = f1_scores + cls_scores\n method_name = method_name + cls_mtd_name\n self.plot_f1_scores(method_name, \n f1_scores,\n plot_title=\"Gender Prediction\", \n file_name='gender_prediction')", "def _predict_sex(self):\n self._compute_y_x_gene_ratio()\n logger.debug(\"ratio of detected Y genes to detected X genes: {}\"\n .format(self.data['y_x_gene_ratio']))\n\n self._compute_y_x_count_ratio()\n logger.debug(\"ratio of Y counts to X counts: {}\"\n .format(self.data['y_x_count_ratio']))\n\n possible_eqs={\n 'y_sq_over_tot': '(y_counts^2 / total_counts) > cutoff',\n 'gene_ratio': '(y_genes / x_genes) > cutoff',\n 'counts_ratio': '(y_counts / x_counts) > cutoff'\n }\n equation = possible_eqs[self.sexmodel]\n logger.debug(\"using equation: {}\".format(equation))\n\n if self.sexmodel == 'y_sq_over_tot':\n n_y_sq = float(self.data['y_counts'])**2\n n_tot = float(self.data['total_counts'])\n if n_tot == 0:\n value = n_y_sq\n else:\n value = n_y_sq / n_tot\n\n elif self.sexmodel == 'gene_ratio':\n value = float(self.data['y_x_gene_ratio'])\n\n elif self.sexmodel == 'counts_ratio':\n value = float(self.data['y_x_count_ratio'])\n\n logger.debug(\"value for current sample is {}\"\n .format(value))\n self.data['sexcheck_eqn'] = equation\n self.data['sexcheck_cutoff'] = self.sexcutoff\n\n if value > self.sexcutoff:\n self.data['predicted_sex'] = 'male'\n else:\n self.data['predicted_sex'] = 'female'", "def predict_gender(bfly_rgb, weights='./models/id_gender.pkl'):\n gender = {\n 0: 'female',\n 1: 'male'\n }\n prediction = _classification(bfly_rgb, weights)\n\n return gender.get(prediction)", "def pred_transform(self,pred):\n\n \n gender_label,gender_confidence = np.argmax(pred, axis=1),np.int16(np.max(pred*100, axis=1))\n \n gender_label=np.where(gender_label==0, 'male', gender_label) \n \n gender_label=np.where(gender_label=='1', 'female', gender_label) \n\n results={'gender':{'label':gender_label.tolist(),\n\n 'confidence':gender_confidence.tolist()}}\n return results", "def predict_rating(self, movie):\n\n other_ratings = movie.ratings\n\n similarities = [\n (self.similarity(r.user), r)\n for r in other_ratings\n ]\n\n similarities.sort(reverse=True)\n\n similarities = [(sim, r) for sim, r in similarities if sim > 0]\n\n if not similarities:\n return None\n\n numerator = sum([r.score * sim for sim, r in similarities])\n denominator = sum([sim for sim, r in similarities])\n\n return numerator/denominator\n\n\n #this is the one we wrote", "def predict_rating(review_text, classifier, vectorizer, decision_threshold=0.5):\n\t\n\treview_text = preprocess_text(review_text)\n\treview_vector_np = vectorizer.vectorize(review_text)\n\treview_vector = torch.from_numpy(review_vector_np)\n\tresult = torch.sigmoid(classifier(review_vector.view(1,-1)))\n\tclass_label = None\t\n\tif result.item() < decision_threshold:\n\t\tclass_label = 0\n\telse:\n\t\tclass_label = 1\n\t\t\n\treturn vectorizer.rating_vocab.lookup_index(class_label)", "def predict_rating(user_dict, movie_dict, i, k):\n if i in user_dict.keys() and k in movie_dict.keys():\n prediction = predict_both_known(user_dict, movie_dict, i, k)\n elif i not in user_dict.keys() and k in movie_dict.keys():\n prediction = predict_unknown_user(user_dict, movie_dict, k)\n elif i in user_dict.keys() and k not in movie_dict.keys():\n prediction = predict_unknown_movie(user_dict, movie_dict, i)\n else:\n prediction = predict_both_unknown(user_dict)\n return prediction", "def possessive(self):\n if hasattr(self, 'gender'):\n if self.gender == 'male': \n return 'his'\n elif self.gender == 'female':\n return 'her'\n elif self.gender == 'non-binary':\n return 'their'\n # other gender or no gender specified:\n return 'its'", "def _get_gender(self):\n female = ['female', 'actress', 'women']\n male = ['male', 'actor', 'men']\n full_text = self.soup.get_text().lower()\n count_female = full_text.count(' she ') + full_text.count(' her ')\n count_male = full_text.count(' he ') + full_text.count(' his ')\n\n try:\n #Grabs the text in catlinks id\n catlinks = self.soup.find(id='catlinks').text.lower()\n if any(s in catlinks for s in female):\n self.gender = 'F'\n elif any(s in catlinks for s in male):\n self.gender = 'M'\n else:\n try:\n ratio_male = float(count_male) / float(count_female)\n except:\n ratio_male = 1\n if ratio_male > 2:\n self.gender = 'M'\n elif ratio_male < 0.5:\n self.gender = 'F'\n else:\n self.gender = None\n except:\n self.gender = None", "def scorepersentence(reviews):\n vs = []\n for sentence in reviews:\n vs.append(analyzer.polarity_scores(sentence))\n return vs", "def get_movie_genders_dict(movie):\n imdb_pred_dict = predict_gender_imdb(movie, alignment_fn=in_align, assignment_fn=soft_backtrack)\n ssa_pred_dict = predict_gender_ssa(SSA_DICT, movie, mode='hard', check_decade=True)\n pred_dict = _merge_dict(ssa_pred_dict, imdb_pred_dict, True)\n ordered_snames = sorted(list(pred_dict.keys()))\n return pred_dict", "def labelGender(tweet, males, females):\n #name = tweet['user']['name'].lower().split()\n name = tweet.lower().split()\n if len(name) == 0:\n name = ['']\n name = re.findall('\\w+', name[0])\n if len(name) == 0:\n name = ''\n else:\n name = name[0]\n if name in males:\n return 'm'\n tweet['user']['gender'] = 'm'\n elif name in females:\n return 'f'\n tweet['user']['gender'] = 'f'\n else:\n return 'n'\n tweet['user']['gender'] = 'n'\n return tweet", "def predict(user_id, movie_id):\n print_user_info(user_id)\n print_movie_info(movie_id)\n print_actual_rating(user_id, movie_id)\n avg = average_rating(movie_id)\n nearest = nearest_neighbour(user_id, movie_id)\n slope = slope_one(user_id, movie_id)\n hybrid_algorithm(avg, nearest, slope)", "def get_gender(dataset = \"all\"):\r\n\treturn process_main(get_status_statistic, \"get_gender\", dataset, \"gender\", tuple())", "def find_gender(self, ocr_text):\r\n \r\n split_ocr = ocr_text.split('\\n')\r\n #print(\"Split data:\",split_ocr)\r\n #split_ocr = split_ocr.split(' ')\r\n #print(\"Split data:\",split_ocr)\r\n text1=[]\r\n for i in split_ocr:\r\n text1+=i.split(\" \")\r\n \r\n #print(text1)\r\n \r\n #print(\"Split data:\",split_ocr)\r\n \r\n if 'Male' in text1 or 'MALE' in text1:\r\n GENDER = 'Male'\r\n elif 'Female' in text1 or 'FEMALE' in text1:\r\n GENDER = 'Female'\r\n else:\r\n GENDER = 'NAN'\r\n return GENDER", "def genderFloat(gender):\n\tif gender == \"Male\":\n\t\treturn 0\n\telif gender == \"Female\":\n\t\treturn 1\n\telse:\n\t\treturn 2", "def review(self, rated, recommended):", "def classify_recommendation(entry):\n global recommend_clf\n return recommend_clf.predict([entry])[0]", "def predict_paid_or_unpaid(years_experience):\n if years_experience < 3.0:\n return \"paid\"\n elif years_experience < 8.5:\n return \"unpaid\"\n else:\n return \"paid\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects the url's to all schools in the U.S. on Rate My Professor. Saved in school2id.pkl.
def collect_schools(): MIN_OFFSET = 0 MAX_OFFSET = 6700 STEP_SIZE = 20 school2id = {} num_failed = 0 for offset in np.arange(MIN_OFFSET, MAX_OFFSET+STEP_SIZE, step=STEP_SIZE): if offset % 100 == 0: print(offset) url = DOMAIN + '/search.jsp?query=&queryoption=HEADER&stateselect=&country=united+states&dept=&queryBy=schoolName&facetSearch=&schoolName=&offset={}&max=20'.format(offset) r = requests.get(url) soup = BeautifulSoup(r.content, 'html.parser') schools = soup.find_all('li', attrs={'class':'listing SCHOOL'}) for s in schools: try: link = s.find('a') school_id = int(link['href'].split('=')[-1]) name = link.find('span', attrs={'class':'listing-name'}).find('span', attrs={'class':'main'}).text school2id[name] = school_id except: print('Failed:', s.text.strip()) num_failed += 1 print('Num schools found:', len(school2id)) for s in school2id: if 'Columbia' in s: print(s, school2id[s]) pickle.dump(school2id, open('../rate_my_prof/school2id.pkl', 'wb'))
[ "def college_transfer_scrape(schools):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools", "def college_data_scrape(schools, links):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools", "def collect_professors_per_school(only_take_top_20):\n school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb'))\n sorted_schools = sorted(list(school2id.keys()))\n print(len(sorted_schools))\n school2info = {}\n driver = prep_query_by_school_driver()\n total_num_profs = 0\n total_num_prof_pages = 0\n for i, school in enumerate(sorted_schools):\n try:\n sid = school2id[school]\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=only_take_top_20)\n total_num_profs += num_profs\n total_num_prof_pages += len(prof_pages)\n school = school.strip()\n school2info[school] = (sid, num_profs, prof_pages)\n pickle.dump(school2info, open('../rate_my_prof/school2info.pkl', 'wb'))\n print('{}. School: {}. Num CS profs: {} -> SUCCESS'.format(i, school, num_profs, len(prof_pages)))\n except Exception as e:\n print('{}. School: {} -> FAILED'.format(i, school), e)\n driver.quit()\n print('Processed {} schools'.format(len(school2info)))\n print('{} CS profs in total'.format(total_num_profs))\n print('{} prof pages collected'.format(total_num_prof_pages))", "def bing_search(schools, website): \n web_links = []\n \n for school in schools:\n NEW_URL = school['name'] + ' site:' + website\n print NEW_URL\n web_links.append(bing_search2(NEW_URL, 'Web'))\n \n return web_links", "def generateScoreboardList():\r\n urls = []\r\n yr = 2019\r\n while yr < 2019:\r\n for week in range(1, 18):\r\n url = f\"https://www.espn.com/nfl/scoreboard/_/year/{yr}/seasontype/2/week/{week}\"\r\n urls.append(url)\r\n yr += 1\r\n for week in range(1, 5):\r\n url = f\"https://www.espn.com/nfl/scoreboard/_/year/{yr}/seasontype/2/week/{week}\"\r\n urls.append(url)\r\n return urls", "def educations(self):\n schools = []\n if len(self.xp_educations) > 0:\n for school in self.xp_educations:\n data = {}\n data['university_name'] = extract_one(self.get_xp(school,\n './/h4[@class=\"item-title\"]//text()'))\n data['linkedin_university_url'] = extract_one(self.get_xp(school,\n './/h4[@class=\"item-title\"]/a/@href'))\n data['linkedin_university_img_url'] = extract_one(self.get_xp(school,\n './/h5[@class=\"logo\"]/a/img/@src'))\n data['description'] = extract_one(self.get_xp(\n school, './/h5[@class=\"item-subtitle\"]//text()'))\n if data['description'] is not None:\n data['degree'] = get_list_i(data['description'].split(','), 0)\n data['major'] = get_list_i(data['description'].split(','), 1)\n else:\n data['degree'] = None\n data['major'] = None\n start_date = self.get_xp(\n school, './/span[@class=\"date-range\"]/time[1]/text()')\n end_date = self.get_xp(\n school, './/span[@class=\"date-range\"]/time[2]/text()')\n data['start_date'] = extract_one(start_date)\n if end_date:\n data['end_date'] = extract_one(end_date)\n else:\n data['end_date'] = None\n schools.append(data)\n if not schools and self.code_data:\n code_educations = self.code_data[\n 'com.linkedin.voyager.identity.profile.Education'].values()\n for education in code_educations:\n data = {}\n data['university_name'] = education.get('schoolName')\n data['description'] = education.get('description')\n data['degree'] = education.get('degreeName')\n data['major'] = education.get('fieldOfStudy')\n data.update(self.get_dates_from_time_period(education))\n schools.append(data)\n today = time.strftime('%Y-%m-%d')\n schools.sort(key=lambda x: (x.get('end_date', today),\n x.get('start_date', '0')),\n reverse=True)\n return schools", "def generate_courses_urls_list(soup):\n return [anchor['href'] for anchor in soup.find_all('a', 'wrblue')]", "def schools():\n return (\n db_session.query(School)\n .filter(School.building_code.in_(school_codes))\n )", "def edit_professors_per_school():\n driver = prep_query_by_school_driver()\n fn = '../1.rate_my_prof/school2info.pkl'\n school2info = pickle.load(open(fn, 'rb'))\n missing_before = 0\n missing_now = 0\n for school, (sid, num_profs, prof_pages) in school2info.items():\n if len(prof_pages) < num_profs:\n missing_before += num_profs - len(prof_pages)\n try:\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=False)\n print('{} -> got {} out of {}'.format(school, len(prof_pages), num_profs))\n missing_now += num_profs - len(prof_pages)\n school2info[school] = (sid, num_profs, prof_pages)\n except:\n print('Failed parsing {} -> no change'.format(school))\n missing_now += num_profs - len(prof_pages) # still missing same amount\n print('Missing {} profs before, missing {} profs now'.format(missing_before, missing_now))\n pickle.dump(school2info, open(fn, 'wb'))", "def create_all_yr_urls():\n url_lst = []\n url = 'https://web.archive.org/web/20110622061736/http://content.usatoday.com:80/sportsdata/football/nfl/%s/salaries/%s'\n for i in np.arange(2000,2018):\n #Internet ARchive when 2000 <= i <=2009\n if i < 2010:\n url = 'https://web.archive.org/web/20110622061736/http://content.usatoday.com:80/sportsdata/football/nfl/%s/salaries/%s'\n if i == 2002:\n int_arch_teams.append('Texans')\n url_lst.append(create_int_arch_url(url, int_arch_teams, i))\n elif i > 2002:\n url_lst.append(create_int_arch_url(url, int_arch_teams, i))\n # for team in int_arch_teams:\n # url = 'https://web.archive.org/web/20110622061736/http://content.usatoday.com:80/sportsdata/football/nfl/%s/salaries/%s' % (team, i)\n # print(url)\n else:\n try:\n int_arch_teams.remove('Texans')\n except ValueError:\n url_lst.append(create_int_arch_url(url, int_arch_teams, i))\n elif i == 2010:\n continue\n else:\n #spotrac website for years 2011 - 2017\n url = 'https://www.spotrac.com/nfl/%s/cap/%s/'\n if i < 2016:\n url_lst.append(create_int_arch_url(url, int_arch_teams, i))\n elif i == 2016:\n try:\n spotrac_teams.remove('st.-louis-rams')\n except ValueError:\n spotrac_teams.append('los-angeles-rams')\n spotrac_teams.append('los-angeles-rams')\n url_lst.append(create_int_arch_url(url, int_arch_teams, i))\n else:\n try:\n spotrac_teams.remove('san-diego-chargers')\n except ValueError:\n url_lst.append(create_int_arch_url(url, int_arch_teams, i))\n continue\n spotrac_teams.append('los-angeles-chargers')\n url_lst.append(create_int_arch_url(url, int_arch_teams, i))\n # create_int_arch_url(url, int_arch_teams, i)\n flat_url_lst = [item for sublist in url_lst for item in sublist]\n return flat_url_lst", "def get_professors_from_school(driver, school_id, only_take_top_20 = False):\n url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(school_id)\n driver.get(url)\n num_professors = int(driver.find_element_by_xpath(\"//span[@class='professor-count']\").text)\n if num_professors == 0:\n return num_professors, []\n if only_take_top_20 or num_professors < 20:\n return num_professors, get_current_list_of_professors(driver)\n results = []\n letter_filters = driver.find_elements_by_xpath(\"//a[@class='result']\")\n for filter in letter_filters:\n filter_text = filter.text.strip()\n if filter_text != 'ALL':\n filter.click()\n time.sleep(.05)\n results += get_current_list_of_professors(driver)\n results = set(results)\n return num_professors, results", "def get_rankings(section_url, schools, rankings):\n soup = make_soup(section_url)\n \n for item in soup.findAll('span', 'rankscore-bronze'):\n rank = item.text.encode('ascii', 'ignore')\n rank = int(rank.translate(None, '#'))\n rankings.append(rank)\n for item in soup.findAll('a', 'school-name'):\n school = item.text.encode('ascii', 'replace').replace('?', ' ')\n school = school.replace('\\\\u200b', ' ').replace('\\\\u2014', ' ')\n schools.append(school)\n return [schools, rankings]", "def results(request):\n\n prop_data = request.session.get('prop')\n schools = GreatSchools(\n prop_data['address'], prop_data['city'], prop_data['state'], prop_data['zip_code'], prop_data['county'])\n schools.set_greatschool_urls()\n if schools.api_key and schools.DAILY_API_CALL_COUNT <= 2950:\n for url in schools.urls:\n schools.get_greatschool_xml(url)\n\n else:\n schools.elem_school = 'Unknown'\n schools.mid_school = 'Unknown'\n schools.high_school = 'Unknown'\n prop = PropSetup(prop_data['address'])\n for key in prop_data.keys():\n prop.__dict__[key] = prop_data[key]\n\n context = {\n 'address': prop.address,\n 'taxes': '$' + str(int(int(prop.taxes) / 12)),\n 'hoa': '$' + str(int(int(prop.hoa) / 12)),\n 'rent': '$' + str(prop.rent),\n 'vacancy': '$' + str(prop.vacancy_calc),\n 'oper_income': '$' + str(prop.oper_inc_calc),\n 'total_mortgage': '$' + str(prop.total_mortgage_calc),\n 'down_payment_percentage': str(prop.down_payment_percentage) + '%',\n 'down_payment': '$' + str(prop.down_payment_calc),\n 'curr_value': '$' + str(prop.curr_value),\n 'init_cash_invest': '$' + str(prop.init_cash_invested_calc),\n 'oper_exp': '$' + str(prop.oper_exp_calc),\n 'net_oper_income': '$' + str(prop.net_oper_income_calc),\n 'cap_rate': '{0:.1f}%'.format(prop.cap_rate_calc * 100),\n 'initial_market_value': '$' + str(prop.curr_value),\n 'interest_rate': str(prop.interest_rate) + '%',\n 'mort_payment': '$' + str(prop.mort_payment_calc),\n 'sqft': prop.sqft,\n 'closing_costs': '$' + str(prop.closing_costs),\n 'initial_improvements': '$' + str(prop.initial_improvements),\n 'cost_per_sqft': '$' + str(prop.cost_per_sqft_calc),\n 'insurance': '$' + str(int(int(prop.insurance) / 12)),\n 'maintenance': '$' + str(int(int(prop.maint_calc) / 12)),\n 'prop_management_fee': '$' + str(prop.prop_management_fee),\n 'utilities': '$' + str(prop.utilities),\n 'tenant_placement_fee': '$' + str(int(int(prop.tenant_place_calc) / 12)),\n 'resign_fee': '$' + str(int(int(prop.resign_calc) / 12)),\n 'notes': prop.notes,\n 'pub_date': timezone.now,\n 'rtv': '{0:.2f}%'.format(prop.rtv_calc * 100),\n 'cash_flow': '$' + str(prop.cash_flow_calc),\n 'oper_exp_ratio': '{0:.1f}'.format(prop.oper_exp_ratio_calc * 100) + '%',\n 'debt_coverage_ratio': prop.debt_coverage_ratio_calc,\n 'cash_on_cash': '{0:.2f}%'.format(prop.cash_on_cash_calc * 100),\n 'elem_school': schools.elem_school,\n 'elem_school_score': schools.elem_school_score,\n 'mid_school': schools.mid_school,\n 'mid_school_score': schools.mid_school_score,\n 'high_school': schools.high_school,\n 'high_school_score': schools.high_school_score,\n 'year_built': prop.year_built,\n 'county': prop.county,\n 'nat_disasters': 'Unknown',\n 'listing_url': prop.listing_url,\n 'beds': prop.beds,\n 'baths': prop.baths,\n 'livability': prop.areavibes_dict['livability'],\n 'crime': prop.areavibes_dict['crime'],\n 'cost_of_living': prop.areavibes_dict['cost_of_living'],\n 'schools': prop.areavibes_dict['schools'],\n 'employment': prop.areavibes_dict['employment'],\n 'housing': prop.areavibes_dict['housing'],\n 'weather': prop.areavibes_dict['weather'],\n 'disaster1_type': prop.disaster_dict['1'][0],\n 'disaster1_date': prop.disaster_dict['1'][1],\n 'disaster1_county': prop.disaster_dict['1'][2],\n 'disaster1_url': prop.disaster_dict['1'][4],\n 'disaster1_title': prop.disaster_dict['1'][5],\n 'disaster2_type': prop.disaster_dict['2'][0],\n 'disaster2_date': prop.disaster_dict['2'][1],\n 'disaster2_county': prop.disaster_dict['2'][2],\n 'disaster2_url': prop.disaster_dict['2'][4],\n 'disaster2_title': prop.disaster_dict['2'][5],\n 'disaster3_type': prop.disaster_dict['3'][0],\n 'disaster3_date': prop.disaster_dict['3'][1],\n 'disaster3_county': prop.disaster_dict['3'][2],\n 'disaster3_url': prop.disaster_dict['3'][4],\n 'disaster3_title': prop.disaster_dict['3'][5],\n 'disaster4_type': prop.disaster_dict['4'][0],\n 'disaster4_date': prop.disaster_dict['4'][1],\n 'disaster4_county': prop.disaster_dict['4'][2],\n 'disaster4_url': prop.disaster_dict['4'][4],\n 'disaster4_title': prop.disaster_dict['4'][5],\n 'disaster5_type': prop.disaster_dict['5'][0],\n 'disaster5_date': prop.disaster_dict['5'][1],\n 'disaster5_county': prop.disaster_dict['5'][2],\n 'disaster5_url': prop.disaster_dict['5'][4],\n 'disaster5_title': prop.disaster_dict['5'][5],\n }\n request.session['PROP'] = prop.__dict__\n return render(request, 'app/results.html', context)", "def get_schools(self):\n subordinate_dict = self.get_subordinate_organizations()\n schools = []\n for division in subordinate_dict.values():\n schools.extend(division)\n return schools", "def _get_all_url(cls) -> str:", "def teachers_schools():\n class_school = db.session.query(TeacherSchool.teacher_school_id,\n School.name, Teacher.teacher_name).join(School).join(Teacher).all()\n \n return class_school", "def get_lessons(self):\n lessons = []\n for item in self.student.student_class.lessons.all():\n lesson = (str(item.period),\n str(item.unit),\n str(item.venue),\n item.get_type_display(),\n str(item.lecturer) or \"\")\n\n self.lessons.update(\n {\"\\n\".join([lesson[0], lesson[1]]): item.pk}\n )\n\n lessons.append(lesson)\n\n return lessons", "def prep_query_by_school_driver():\n driver = webdriver.Chrome(os.path.join(os.getcwd(), 'chromedriver'))\n columbia_url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(COLUMBIA_ID)\n driver.get(columbia_url)\n driver.find_element_by_class_name('close-this').click()\n dept_input = driver.find_element_by_xpath(\"//input[@placeholder='Enter Your Department']\")\n dept_input.send_keys('Computer Science')\n cs_option = driver.find_element_by_xpath(\"//li[@data-value='Computer Science']\")\n cs_option.click()\n return driver", "def extract_url(data):\n \n table, semifinal_teams = extract_teams(data)\n wiki = find_articles(str(table)) # Finds all the wikipedia links of the teams\n wiki = list(dict.fromkeys(wiki)) # Removes all duplicate elements\n wiki = '\\n'.join(wiki) # Creates spaces between each list element\n team_urls = []\n \n for team in semifinal_teams:\n # Extracts all the team urls\n regex = '^(https:\\/\\/en\\.[\\w]+\\.[\\w]+\\/wiki\\/\\d{4}%[\\w]+%\\d{2}%\\d{4}_(Los_Angeles_)?' + team + '[\\w]+)$' \n \n temp = re.findall(regex, wiki, re.M)\n if temp != []:\n for t in temp: \n team_urls.append(''.join(t[0]))\n \n \n return team_urls, semifinal_teams" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects the list of CS professor pages per school. Saved in school2info.pkl.
def collect_professors_per_school(only_take_top_20): school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb')) sorted_schools = sorted(list(school2id.keys())) print(len(sorted_schools)) school2info = {} driver = prep_query_by_school_driver() total_num_profs = 0 total_num_prof_pages = 0 for i, school in enumerate(sorted_schools): try: sid = school2id[school] num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=only_take_top_20) total_num_profs += num_profs total_num_prof_pages += len(prof_pages) school = school.strip() school2info[school] = (sid, num_profs, prof_pages) pickle.dump(school2info, open('../rate_my_prof/school2info.pkl', 'wb')) print('{}. School: {}. Num CS profs: {} -> SUCCESS'.format(i, school, num_profs, len(prof_pages))) except Exception as e: print('{}. School: {} -> FAILED'.format(i, school), e) driver.quit() print('Processed {} schools'.format(len(school2info))) print('{} CS profs in total'.format(total_num_profs)) print('{} prof pages collected'.format(total_num_prof_pages))
[ "def collect_schools():\n MIN_OFFSET = 0\n MAX_OFFSET = 6700\n STEP_SIZE = 20\n school2id = {}\n num_failed = 0\n for offset in np.arange(MIN_OFFSET, MAX_OFFSET+STEP_SIZE, step=STEP_SIZE):\n if offset % 100 == 0: print(offset)\n url = DOMAIN + '/search.jsp?query=&queryoption=HEADER&stateselect=&country=united+states&dept=&queryBy=schoolName&facetSearch=&schoolName=&offset={}&max=20'.format(offset)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n schools = soup.find_all('li', attrs={'class':'listing SCHOOL'})\n for s in schools:\n try:\n link = s.find('a')\n school_id = int(link['href'].split('=')[-1])\n name = link.find('span', attrs={'class':'listing-name'}).find('span', attrs={'class':'main'}).text\n school2id[name] = school_id\n except:\n print('Failed:', s.text.strip())\n num_failed += 1\n print('Num schools found:', len(school2id))\n for s in school2id:\n if 'Columbia' in s:\n print(s, school2id[s])\n pickle.dump(school2id, open('../rate_my_prof/school2id.pkl', 'wb'))", "def edit_professors_per_school():\n driver = prep_query_by_school_driver()\n fn = '../1.rate_my_prof/school2info.pkl'\n school2info = pickle.load(open(fn, 'rb'))\n missing_before = 0\n missing_now = 0\n for school, (sid, num_profs, prof_pages) in school2info.items():\n if len(prof_pages) < num_profs:\n missing_before += num_profs - len(prof_pages)\n try:\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=False)\n print('{} -> got {} out of {}'.format(school, len(prof_pages), num_profs))\n missing_now += num_profs - len(prof_pages)\n school2info[school] = (sid, num_profs, prof_pages)\n except:\n print('Failed parsing {} -> no change'.format(school))\n missing_now += num_profs - len(prof_pages) # still missing same amount\n print('Missing {} profs before, missing {} profs now'.format(missing_before, missing_now))\n pickle.dump(school2info, open(fn, 'wb'))", "def college_transfer_scrape(schools):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools", "def college_data_scrape(schools, links):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools", "def educations(self):\n schools = []\n if len(self.xp_educations) > 0:\n for school in self.xp_educations:\n data = {}\n data['university_name'] = extract_one(self.get_xp(school,\n './/h4[@class=\"item-title\"]//text()'))\n data['linkedin_university_url'] = extract_one(self.get_xp(school,\n './/h4[@class=\"item-title\"]/a/@href'))\n data['linkedin_university_img_url'] = extract_one(self.get_xp(school,\n './/h5[@class=\"logo\"]/a/img/@src'))\n data['description'] = extract_one(self.get_xp(\n school, './/h5[@class=\"item-subtitle\"]//text()'))\n if data['description'] is not None:\n data['degree'] = get_list_i(data['description'].split(','), 0)\n data['major'] = get_list_i(data['description'].split(','), 1)\n else:\n data['degree'] = None\n data['major'] = None\n start_date = self.get_xp(\n school, './/span[@class=\"date-range\"]/time[1]/text()')\n end_date = self.get_xp(\n school, './/span[@class=\"date-range\"]/time[2]/text()')\n data['start_date'] = extract_one(start_date)\n if end_date:\n data['end_date'] = extract_one(end_date)\n else:\n data['end_date'] = None\n schools.append(data)\n if not schools and self.code_data:\n code_educations = self.code_data[\n 'com.linkedin.voyager.identity.profile.Education'].values()\n for education in code_educations:\n data = {}\n data['university_name'] = education.get('schoolName')\n data['description'] = education.get('description')\n data['degree'] = education.get('degreeName')\n data['major'] = education.get('fieldOfStudy')\n data.update(self.get_dates_from_time_period(education))\n schools.append(data)\n today = time.strftime('%Y-%m-%d')\n schools.sort(key=lambda x: (x.get('end_date', today),\n x.get('start_date', '0')),\n reverse=True)\n return schools", "def get_sections_for_school(self):\n sections = []\n for class_year in SchoolDB.choices.ClassYearNames:\n if self.class_year_took_test(class_year):\n query = Section.all(keys_only=True)\n query.filter(\"organization =\", \n SchoolDB.models.getActiveOrganization())\n query.filter(\"class_year = \", class_year)\n sections.extend(query.fetch(100))\n return sections", "def page_flipper(BASE_URL):\n soup = make_soup(BASE_URL)\n \n schools = []\n rankings = []\n schoolRanks = []\n pageLimit = 4\n index = 1\n \n while index <= pageLimit:\n section_url = BASE_URL + str(index)\n schoolRanks = get_rankings(section_url, schools, rankings)\n index += 1\n \n return schoolRanks", "def get_professors_from_school(driver, school_id, only_take_top_20 = False):\n url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(school_id)\n driver.get(url)\n num_professors = int(driver.find_element_by_xpath(\"//span[@class='professor-count']\").text)\n if num_professors == 0:\n return num_professors, []\n if only_take_top_20 or num_professors < 20:\n return num_professors, get_current_list_of_professors(driver)\n results = []\n letter_filters = driver.find_elements_by_xpath(\"//a[@class='result']\")\n for filter in letter_filters:\n filter_text = filter.text.strip()\n if filter_text != 'ALL':\n filter.click()\n time.sleep(.05)\n results += get_current_list_of_professors(driver)\n results = set(results)\n return num_professors, results", "def get_property_listings_per_college(self, driver):\n # accept page terms & conditions\n sleep(1)\n button = driver.find_element_by_xpath('//*[@id=\"MainContent_btnAgere\"]').click()\n sleep(10)\n\n property_links = []\n\n # process first page\n property_links += self.get_page_property_listings(driver)\n # loop through remaining pages\n next_pages = self.get_next_pages(driver)\n num_pages = len(next_pages)//2\n current_page = 0\n while current_page < num_pages:\n page_button = next_pages[current_page].click()\n sleep(10)\n property_links += self.get_page_property_listings(driver)\n next_pages = self.get_next_pages(driver)\n current_page += 1\n\n return property_links", "def test_get_professor_list(self):\n url = reverse('institute-professors-list', args=['IC'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def schools():\n return (\n db_session.query(School)\n .filter(School.building_code.in_(school_codes))\n )", "def parseSchool(l):\n school = l[0].strip('\\xca')\n division = l[3][:4] + ' ' + l[3][6:8]\n return [school, division, parseScholarship(l[6])]", "def build_corpus(start_idx, num_schools_to_process):\n current_corpus = get_current_corpus()\n school2info = pickle.load(open('../1.rate_my_prof/school2info.pkl', 'rb'))\n sorted_schools = sorted(list(school2info.keys()))\n print('Total num schools:', len(sorted_schools))\n end_idx = min(len(sorted_schools), start_idx + num_schools_to_process)\n print('Processing schools from idx {} to {} ({} schools)'.format(start_idx, end_idx-1, end_idx-start_idx))\n total_num_new_reviews = 0\n for i in range(start_idx, end_idx):\n school = sorted_schools[i]\n sid, num_profs, prof_pages = school2info[school]\n if len(prof_pages) == 0:\n print('{}. {} -> no data on CS professors'.format(i, school))\n else:\n school_num_new_reviews = 0\n for prof_name, prof_url in prof_pages:\n fn = make_filename(prof_name, prof_url)\n if fn not in current_corpus:\n try:\n num_reviews, processed_reviews = parse_professor_page(prof_url)\n if len(processed_reviews) > 0:\n gender = predict_gender_from_reviews(processed_reviews)\n write_reviews_to_file(fn, prof_name, school, prof_url, num_reviews, gender, processed_reviews)\n school_num_new_reviews += len(processed_reviews)\n total_num_new_reviews += len(processed_reviews)\n except:\n print('Warning: failed on Prof. {} (id:{})'.format(prof_name, extract_prof_id(prof_url)))\n print('{}. {} -> num prof pages = {}, num new reviews = {}'.format(i, school, len(prof_pages), school_num_new_reviews))\n print('\\nFINISHED!')\n new_corpus = get_current_corpus()\n print('Num profs before: {}. Num profs now: {}.'.format(len(current_corpus), len(new_corpus)))", "def get_courses(bs, doc_ref):\n courses = bs.find(id=\"ACE_$ICField$4$$0\").tr.find_next_siblings('tr')\n for course in courses:\n title = course.find('a', {'class': 'PSHYPERLINK PTCOLLAPSE_ARROW'}).parent\n sections = course.find_all('table', {'class': 'PSLEVEL1GRIDNBONBO'})\n for section in sections:\n section = section.find('tr').find_next_sibling('tr')\n tds = section.find_all('td')\n\n doc_ref.collection('courses').document(title.get_text().strip().split('-')[0]) \\\n .collection('sections').document(tds[0].get_text().strip()).set({\n 'section': tds[1].get_text().split()[0].split('-')[1].strip(),\n 'time': tds[2].get_text().strip(),\n 'Instructor': tds[4].get_text().strip(),\n 'Status': tds[6].img['alt']\n }\n )", "def scraper(page):\n\n # Initialize empty lists\n titles = []\n urls = []\n techs = []\n instructors = []\n\n # Start scraper and get course blocks\n soup = BeautifulSoup(page, 'html')\n div = soup.findAll(\"div\", { \"class\": \"course-block\"})\n\n # Loop over all courses\n for element in div:\n a = element.find(\"a\", { \"class\": \"course-block__link\"})\n\n # Get url\n url = 'https://www.datacamp.com' + a.get('href')\n\n # Get tech\n if a.contents[1].get(\"class\")[1] == 'course-block__technology--r':\n tech = 'R'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--python':\n tech = 'Python'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--sql':\n tech = 'SQL'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--git':\n tech = 'Git'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--shell':\n tech = 'Shell'\n\n # Get title\n title = [element.get_text() for element in a.select(\"h4\")][0]\n\n # Get instructor\n instructor_div = element.find(\"div\", { \"class\": \"course-block__author-body\"})\n instructor = [element.get_text() for element in instructor_div.select(\"p\")][0]\n\n # Write information in lists\n titles.append(title)\n urls.append(url)\n techs.append(tech)\n instructors.append(instructor)\n\n # Write ordered dictionary and return it\n courses = OrderedDict({'Course': titles,\n 'URL': urls,\n 'Tech': techs,\n 'Instructor': instructors})\n\n return courses", "def fetch_student_records(self) -> List[str]:\n return [self.cwid, self.name, self.major, sorted(self.courses.keys())]", "def populate_database():\n database_schools = []\n n = 0\n cols = []\n cols_with_size = get_sizes()\n while n < len(colleges_with_sat):\n c = C(colleges_with_sat[n], colleges_with_sat[n+1], colleges_with_sat[n+2], colleges_with_sat[n+3], colleges_with_sat[n+4])\n cols.append(c)\n n+=5\n \n for i in range(0, len(colleges)):\n name = colleges[i]\n if False: #db_college_exists(name):\n continue\n sats = {}\n size = 0\n tuition = 0\n address = \"\"\n zipcode = 0\n matched = False\n for c in cols:\n if levenshtein(c.name, name) < 3:\n matched = True\n sats['math'] = c.math_range\n sats['reading'] = c.read_range\n if not matched:\n sats = None\n for c in cols_with_size:\n #print c[0]\n if levenshtein(c[0], name) < 3:\n size = c[1]\n tuition = c[2]\n address = c[3]\n zipcode = c[4]\n #print c\n break\n college = College(name, \"\", i, sats, size, tuition, address, zipcode)\n #print college\n database_schools.append(college)\n #college.print_college()\n user = User()\n user.name = \"Aaron\"\n user.sats = {\"math\" : 800, \"reading\" : 800}\n\n #print college.find_location()\n #print college.get_difficulty()\n return database_schools", "def mine_utsg_courses():\n st_george_api_url = \"https://timetable.iit.artsci.utoronto.ca/api/20209/courses?org=\"\n\n course_data = {}\n\n for subject in tqdm(st_george_subjects, desc=\"UTSG\"):\n request_url = st_george_api_url + subject\n results = json.loads(requests.get(request_url).text)\n\n for key in results:\n\n course_code = results[key]['code']\n\n if course_code in course_data:\n continue\n\n course_title = results[key]['courseTitle']\n course_description = BeautifulSoup(results[key]['courseDescription'], 'html5lib').text.strip()\n exclusions = results[key]['exclusion']\n prerequisites = results[key]['prerequisite']\n corequisites = results[key]['corequisite']\n\n course_data[course_code] = {\"Title\": course_title,\n \"Description\": course_description,\n \"Exclusions\": exclusions,\n \"Prerequisites\": prerequisites,\n \"Corequisites\": corequisites}\n\n with open('./data/utsg_courses.pickle', 'wb') as handle:\n pickle.dump(course_data, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def search_courses(session):\n page = session.get(URL)\n bs = BeautifulSoup(page.text, 'lxml')\n colleges = get_college(bs)\n for college in colleges:\n terms = get_term(session, bs, college)\n for term in terms[1:]:\n majors = get_majors(session, bs, college, term)\n for major in majors:\n for career in CAREER:\n doc_ref = db.collection('colleges').document(college) \\\n .collection('majors').document(major) \\\n .collection('terms').document(term) \\\n .collection('career').document(career)\n\n values = get_param_for_courses(bs, college, term, career, major)\n page = session.post(URL, data=values, headers=headers)\n bs1 = BeautifulSoup(page.text, 'lxml')\n try:\n get_courses(bs1, doc_ref)\n except AttributeError as ex:\n print('No course found')\n time.sleep(randint(0, 1))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Edits school2info.pkl to collect more professor pages for schools with more than 20 CS professors.
def edit_professors_per_school(): driver = prep_query_by_school_driver() fn = '../1.rate_my_prof/school2info.pkl' school2info = pickle.load(open(fn, 'rb')) missing_before = 0 missing_now = 0 for school, (sid, num_profs, prof_pages) in school2info.items(): if len(prof_pages) < num_profs: missing_before += num_profs - len(prof_pages) try: num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=False) print('{} -> got {} out of {}'.format(school, len(prof_pages), num_profs)) missing_now += num_profs - len(prof_pages) school2info[school] = (sid, num_profs, prof_pages) except: print('Failed parsing {} -> no change'.format(school)) missing_now += num_profs - len(prof_pages) # still missing same amount print('Missing {} profs before, missing {} profs now'.format(missing_before, missing_now)) pickle.dump(school2info, open(fn, 'wb'))
[ "def collect_professors_per_school(only_take_top_20):\n school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb'))\n sorted_schools = sorted(list(school2id.keys()))\n print(len(sorted_schools))\n school2info = {}\n driver = prep_query_by_school_driver()\n total_num_profs = 0\n total_num_prof_pages = 0\n for i, school in enumerate(sorted_schools):\n try:\n sid = school2id[school]\n num_profs, prof_pages = get_professors_from_school(driver, sid, only_take_top_20=only_take_top_20)\n total_num_profs += num_profs\n total_num_prof_pages += len(prof_pages)\n school = school.strip()\n school2info[school] = (sid, num_profs, prof_pages)\n pickle.dump(school2info, open('../rate_my_prof/school2info.pkl', 'wb'))\n print('{}. School: {}. Num CS profs: {} -> SUCCESS'.format(i, school, num_profs, len(prof_pages)))\n except Exception as e:\n print('{}. School: {} -> FAILED'.format(i, school), e)\n driver.quit()\n print('Processed {} schools'.format(len(school2info)))\n print('{} CS profs in total'.format(total_num_profs))\n print('{} prof pages collected'.format(total_num_prof_pages))", "def collect_schools():\n MIN_OFFSET = 0\n MAX_OFFSET = 6700\n STEP_SIZE = 20\n school2id = {}\n num_failed = 0\n for offset in np.arange(MIN_OFFSET, MAX_OFFSET+STEP_SIZE, step=STEP_SIZE):\n if offset % 100 == 0: print(offset)\n url = DOMAIN + '/search.jsp?query=&queryoption=HEADER&stateselect=&country=united+states&dept=&queryBy=schoolName&facetSearch=&schoolName=&offset={}&max=20'.format(offset)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n schools = soup.find_all('li', attrs={'class':'listing SCHOOL'})\n for s in schools:\n try:\n link = s.find('a')\n school_id = int(link['href'].split('=')[-1])\n name = link.find('span', attrs={'class':'listing-name'}).find('span', attrs={'class':'main'}).text\n school2id[name] = school_id\n except:\n print('Failed:', s.text.strip())\n num_failed += 1\n print('Num schools found:', len(school2id))\n for s in school2id:\n if 'Columbia' in s:\n print(s, school2id[s])\n pickle.dump(school2id, open('../rate_my_prof/school2id.pkl', 'wb'))", "def get_professors_from_school(driver, school_id, only_take_top_20 = False):\n url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(school_id)\n driver.get(url)\n num_professors = int(driver.find_element_by_xpath(\"//span[@class='professor-count']\").text)\n if num_professors == 0:\n return num_professors, []\n if only_take_top_20 or num_professors < 20:\n return num_professors, get_current_list_of_professors(driver)\n results = []\n letter_filters = driver.find_elements_by_xpath(\"//a[@class='result']\")\n for filter in letter_filters:\n filter_text = filter.text.strip()\n if filter_text != 'ALL':\n filter.click()\n time.sleep(.05)\n results += get_current_list_of_professors(driver)\n results = set(results)\n return num_professors, results", "def educations(self):\n schools = []\n if len(self.xp_educations) > 0:\n for school in self.xp_educations:\n data = {}\n data['university_name'] = extract_one(self.get_xp(school,\n './/h4[@class=\"item-title\"]//text()'))\n data['linkedin_university_url'] = extract_one(self.get_xp(school,\n './/h4[@class=\"item-title\"]/a/@href'))\n data['linkedin_university_img_url'] = extract_one(self.get_xp(school,\n './/h5[@class=\"logo\"]/a/img/@src'))\n data['description'] = extract_one(self.get_xp(\n school, './/h5[@class=\"item-subtitle\"]//text()'))\n if data['description'] is not None:\n data['degree'] = get_list_i(data['description'].split(','), 0)\n data['major'] = get_list_i(data['description'].split(','), 1)\n else:\n data['degree'] = None\n data['major'] = None\n start_date = self.get_xp(\n school, './/span[@class=\"date-range\"]/time[1]/text()')\n end_date = self.get_xp(\n school, './/span[@class=\"date-range\"]/time[2]/text()')\n data['start_date'] = extract_one(start_date)\n if end_date:\n data['end_date'] = extract_one(end_date)\n else:\n data['end_date'] = None\n schools.append(data)\n if not schools and self.code_data:\n code_educations = self.code_data[\n 'com.linkedin.voyager.identity.profile.Education'].values()\n for education in code_educations:\n data = {}\n data['university_name'] = education.get('schoolName')\n data['description'] = education.get('description')\n data['degree'] = education.get('degreeName')\n data['major'] = education.get('fieldOfStudy')\n data.update(self.get_dates_from_time_period(education))\n schools.append(data)\n today = time.strftime('%Y-%m-%d')\n schools.sort(key=lambda x: (x.get('end_date', today),\n x.get('start_date', '0')),\n reverse=True)\n return schools", "def build_corpus(start_idx, num_schools_to_process):\n current_corpus = get_current_corpus()\n school2info = pickle.load(open('../1.rate_my_prof/school2info.pkl', 'rb'))\n sorted_schools = sorted(list(school2info.keys()))\n print('Total num schools:', len(sorted_schools))\n end_idx = min(len(sorted_schools), start_idx + num_schools_to_process)\n print('Processing schools from idx {} to {} ({} schools)'.format(start_idx, end_idx-1, end_idx-start_idx))\n total_num_new_reviews = 0\n for i in range(start_idx, end_idx):\n school = sorted_schools[i]\n sid, num_profs, prof_pages = school2info[school]\n if len(prof_pages) == 0:\n print('{}. {} -> no data on CS professors'.format(i, school))\n else:\n school_num_new_reviews = 0\n for prof_name, prof_url in prof_pages:\n fn = make_filename(prof_name, prof_url)\n if fn not in current_corpus:\n try:\n num_reviews, processed_reviews = parse_professor_page(prof_url)\n if len(processed_reviews) > 0:\n gender = predict_gender_from_reviews(processed_reviews)\n write_reviews_to_file(fn, prof_name, school, prof_url, num_reviews, gender, processed_reviews)\n school_num_new_reviews += len(processed_reviews)\n total_num_new_reviews += len(processed_reviews)\n except:\n print('Warning: failed on Prof. {} (id:{})'.format(prof_name, extract_prof_id(prof_url)))\n print('{}. {} -> num prof pages = {}, num new reviews = {}'.format(i, school, len(prof_pages), school_num_new_reviews))\n print('\\nFINISHED!')\n new_corpus = get_current_corpus()\n print('Num profs before: {}. Num profs now: {}.'.format(len(current_corpus), len(new_corpus)))", "def show_school_details():\n \n zipcode = request.args.get(\"zipcode\")\n \n if zipcode is None:\n return render_template(\"index_map.html\")\n \n py_zipcode = find_zipcode_from_input(zipcode)\n school = School.query.filter_by(neighborhood_id=zipcode).first()\n if school is None:\n schoolObjects = get_schools(py_zipcode)\n for schoolObject in schoolObjects:\n schoolObject.neighborhood_id = zipcode\n db.session.add(schoolObject)\n db.session.commit()\n else:\n schoolObjects = School.query.filter_by(neighborhood_id=zipcode).all()\n\n return render_template(\"schools_on_map.html\", schoolObjects=schoolObjects, zipcode=zipcode)", "def test_get_professor_list(self):\n url = reverse('institute-professors-list', args=['IC'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def college_transfer_scrape(schools):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools", "def results(request):\n\n prop_data = request.session.get('prop')\n schools = GreatSchools(\n prop_data['address'], prop_data['city'], prop_data['state'], prop_data['zip_code'], prop_data['county'])\n schools.set_greatschool_urls()\n if schools.api_key and schools.DAILY_API_CALL_COUNT <= 2950:\n for url in schools.urls:\n schools.get_greatschool_xml(url)\n\n else:\n schools.elem_school = 'Unknown'\n schools.mid_school = 'Unknown'\n schools.high_school = 'Unknown'\n prop = PropSetup(prop_data['address'])\n for key in prop_data.keys():\n prop.__dict__[key] = prop_data[key]\n\n context = {\n 'address': prop.address,\n 'taxes': '$' + str(int(int(prop.taxes) / 12)),\n 'hoa': '$' + str(int(int(prop.hoa) / 12)),\n 'rent': '$' + str(prop.rent),\n 'vacancy': '$' + str(prop.vacancy_calc),\n 'oper_income': '$' + str(prop.oper_inc_calc),\n 'total_mortgage': '$' + str(prop.total_mortgage_calc),\n 'down_payment_percentage': str(prop.down_payment_percentage) + '%',\n 'down_payment': '$' + str(prop.down_payment_calc),\n 'curr_value': '$' + str(prop.curr_value),\n 'init_cash_invest': '$' + str(prop.init_cash_invested_calc),\n 'oper_exp': '$' + str(prop.oper_exp_calc),\n 'net_oper_income': '$' + str(prop.net_oper_income_calc),\n 'cap_rate': '{0:.1f}%'.format(prop.cap_rate_calc * 100),\n 'initial_market_value': '$' + str(prop.curr_value),\n 'interest_rate': str(prop.interest_rate) + '%',\n 'mort_payment': '$' + str(prop.mort_payment_calc),\n 'sqft': prop.sqft,\n 'closing_costs': '$' + str(prop.closing_costs),\n 'initial_improvements': '$' + str(prop.initial_improvements),\n 'cost_per_sqft': '$' + str(prop.cost_per_sqft_calc),\n 'insurance': '$' + str(int(int(prop.insurance) / 12)),\n 'maintenance': '$' + str(int(int(prop.maint_calc) / 12)),\n 'prop_management_fee': '$' + str(prop.prop_management_fee),\n 'utilities': '$' + str(prop.utilities),\n 'tenant_placement_fee': '$' + str(int(int(prop.tenant_place_calc) / 12)),\n 'resign_fee': '$' + str(int(int(prop.resign_calc) / 12)),\n 'notes': prop.notes,\n 'pub_date': timezone.now,\n 'rtv': '{0:.2f}%'.format(prop.rtv_calc * 100),\n 'cash_flow': '$' + str(prop.cash_flow_calc),\n 'oper_exp_ratio': '{0:.1f}'.format(prop.oper_exp_ratio_calc * 100) + '%',\n 'debt_coverage_ratio': prop.debt_coverage_ratio_calc,\n 'cash_on_cash': '{0:.2f}%'.format(prop.cash_on_cash_calc * 100),\n 'elem_school': schools.elem_school,\n 'elem_school_score': schools.elem_school_score,\n 'mid_school': schools.mid_school,\n 'mid_school_score': schools.mid_school_score,\n 'high_school': schools.high_school,\n 'high_school_score': schools.high_school_score,\n 'year_built': prop.year_built,\n 'county': prop.county,\n 'nat_disasters': 'Unknown',\n 'listing_url': prop.listing_url,\n 'beds': prop.beds,\n 'baths': prop.baths,\n 'livability': prop.areavibes_dict['livability'],\n 'crime': prop.areavibes_dict['crime'],\n 'cost_of_living': prop.areavibes_dict['cost_of_living'],\n 'schools': prop.areavibes_dict['schools'],\n 'employment': prop.areavibes_dict['employment'],\n 'housing': prop.areavibes_dict['housing'],\n 'weather': prop.areavibes_dict['weather'],\n 'disaster1_type': prop.disaster_dict['1'][0],\n 'disaster1_date': prop.disaster_dict['1'][1],\n 'disaster1_county': prop.disaster_dict['1'][2],\n 'disaster1_url': prop.disaster_dict['1'][4],\n 'disaster1_title': prop.disaster_dict['1'][5],\n 'disaster2_type': prop.disaster_dict['2'][0],\n 'disaster2_date': prop.disaster_dict['2'][1],\n 'disaster2_county': prop.disaster_dict['2'][2],\n 'disaster2_url': prop.disaster_dict['2'][4],\n 'disaster2_title': prop.disaster_dict['2'][5],\n 'disaster3_type': prop.disaster_dict['3'][0],\n 'disaster3_date': prop.disaster_dict['3'][1],\n 'disaster3_county': prop.disaster_dict['3'][2],\n 'disaster3_url': prop.disaster_dict['3'][4],\n 'disaster3_title': prop.disaster_dict['3'][5],\n 'disaster4_type': prop.disaster_dict['4'][0],\n 'disaster4_date': prop.disaster_dict['4'][1],\n 'disaster4_county': prop.disaster_dict['4'][2],\n 'disaster4_url': prop.disaster_dict['4'][4],\n 'disaster4_title': prop.disaster_dict['4'][5],\n 'disaster5_type': prop.disaster_dict['5'][0],\n 'disaster5_date': prop.disaster_dict['5'][1],\n 'disaster5_county': prop.disaster_dict['5'][2],\n 'disaster5_url': prop.disaster_dict['5'][4],\n 'disaster5_title': prop.disaster_dict['5'][5],\n }\n request.session['PROP'] = prop.__dict__\n return render(request, 'app/results.html', context)", "def prep_query_by_school_driver():\n driver = webdriver.Chrome(os.path.join(os.getcwd(), 'chromedriver'))\n columbia_url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(COLUMBIA_ID)\n driver.get(columbia_url)\n driver.find_element_by_class_name('close-this').click()\n dept_input = driver.find_element_by_xpath(\"//input[@placeholder='Enter Your Department']\")\n dept_input.send_keys('Computer Science')\n cs_option = driver.find_element_by_xpath(\"//li[@data-value='Computer Science']\")\n cs_option.click()\n return driver", "def populate_database():\n database_schools = []\n n = 0\n cols = []\n cols_with_size = get_sizes()\n while n < len(colleges_with_sat):\n c = C(colleges_with_sat[n], colleges_with_sat[n+1], colleges_with_sat[n+2], colleges_with_sat[n+3], colleges_with_sat[n+4])\n cols.append(c)\n n+=5\n \n for i in range(0, len(colleges)):\n name = colleges[i]\n if False: #db_college_exists(name):\n continue\n sats = {}\n size = 0\n tuition = 0\n address = \"\"\n zipcode = 0\n matched = False\n for c in cols:\n if levenshtein(c.name, name) < 3:\n matched = True\n sats['math'] = c.math_range\n sats['reading'] = c.read_range\n if not matched:\n sats = None\n for c in cols_with_size:\n #print c[0]\n if levenshtein(c[0], name) < 3:\n size = c[1]\n tuition = c[2]\n address = c[3]\n zipcode = c[4]\n #print c\n break\n college = College(name, \"\", i, sats, size, tuition, address, zipcode)\n #print college\n database_schools.append(college)\n #college.print_college()\n user = User()\n user.name = \"Aaron\"\n user.sats = {\"math\" : 800, \"reading\" : 800}\n\n #print college.find_location()\n #print college.get_difficulty()\n return database_schools", "def college_data_scrape(schools, links):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools", "def setup_school(self):\n\n self.school = School()\n # setting up the initial faculty\n random.seed()\n num_teachers = random.randint(0, 12)\n print('num_teachers:', num_teachers) #comment\n # print('No Teachers'\n for i in range(num_teachers):\n t = Teacher();\n t.hire()\n self.school.teachers = list_teachers\n\n #if no teachers have arrived, there are no students\n if len(list_teachers) < 1:\n self.school.students = []\n else:\n av_teachers = self.school.gen_grade_teacher_dict()\n student_enrollment_number = random.randint(82, 100)\n print(student_enrollment_number)\n for i in range(student_enrollment_number): #largest HS in USA has 8076 #!more in readme\n s = Student()\n s = s.enroll(av_teachers)\n try:\n list_students.append(s)\n remove_maxed_teacher(s, av_teachers)\n except Exception as e:\n break;\n print('Enrollment completed')\n self.school.students = list_students\n # self.grade_levels = self.gen_grade_levels()\n # new_student_testcase_full_full(av_teachers)\n print('Current size of faculty is: ', len(self.school.teachers))\n print('Hiring new teacher with no grade assigned:....')\n t = Teacher()\n t.hire()\n self.school.teachers.append(t)\n print('Teacher: ', t.name, ', hired for Grade: ', t.grade_level)\n print('New size of faculty is: ', len(self.school.teachers))\n print('Current size of faculty is: ', len(self.school.teachers))\n print('Hiring a new teacher to work in Grade 12:.....')\n grade_level = '12'\n t_for_12= Teacher()\n t_for_12.hire(grade_level)\n print('Teacher: ', t_for_12.name, ', hired for Grade: ', t_for_12.grade_level)\n print('New size of faculty is: ', len(self.school.teachers))\n self.school.teachers.append(t)\n self.output_startup()", "def test_get_professor_detail(self):\n url = reverse('institute-professors-detail',\n args=['IC', 'Pedro Rezende'])\n BaseAPITest.check_user_permissions(self, None, 'get',\n status.HTTP_200_OK, url)", "def expert_profile(request, expert_id=None):\n try:\n user_expert = True\n person = get_person(request)\n # if expert_id person accessing the view is not expert, is supervisor\n if expert_id:\n user_expert = False\n expert = Person.objects.get(id=expert_id)\n if not supervisor_can_access_expert_profile(person, expert):\n return render_to_response(\"crppdmt/error.html\", {\"error_description\": \"Permission denied.\",},\n context_instance=RequestContext(request))\n else:\n expert = person\n # get personal documents list\n personal_docs = PersonalDocument.objects.filter(expert = expert)\n # get deployment list\n expert_request_list = ExpertRequest.objects.filter(expert=expert)\n # pagination stuff\n paginator = Paginator(expert_request_list, ITEMS_PER_PAGE) # Limit items per page\n page = request.GET.get('page')\n try:\n requests_paginated = paginator.page(page)\n except:\n print(\"Unexpected error:\", sys.exc_info())\n requests_paginated = paginator.page(1)\n\n template = loader.get_template('crppdmt/expert/expert_profile.html')\n context = RequestContext(request, {\n 'request_list': requests_paginated,\n 'username': person.user.username,\n 'user': person.user,\n 'person': person,\n 'personal_docs': personal_docs,\n })\n return HttpResponse(template.render(context))\n except:\n if debug_is_on():\n raise\n else:\n return render_to_response(\"crppdmt/error.html\",\n {\"error_description\": str(sys.exc_traceback),},\n context_instance=RequestContext(request))", "def update_profile(request):\n for reason in SEEKING_HELP_REASONS:\n if not SeekingHelpReason.objects.filter(reason=reason):\n r = SeekingHelpReason(reason=reason)\n r.save()\n if request.method == 'GET':\n if User.objects.filter(username=request.user, student__isnull=False):\n context = {\n 'user_form': UserForm(instance=request.user),\n 'student_form': StudentForm(instance=request.user.student)}\n return render(request, 'caps/StudentUpdateProfile.html', context)\n context = {\n 'errors': ['Only a student can edit his profile.\\\n\t\t\t\tPlease use CaPS Administration to edit your profile.'],\n 'user': request.user}\n return render(request, 'caps/Home.html', context)\n\n # POST request\n user_form = UserForm(request.POST, instance=request.user)\n if User.objects.filter(username=request.user, student__isnull=False):\n student_form = StudentForm(\n request.POST, request.FILES, instance=request.user.student)\n\n if user_form.is_valid() and student_form.is_valid():\n user_form.save()\n student_form.save()\n return redirect(reverse('studentprofile'))\n else:\n context = {'user_form': user_form, 'student_form': student_form}\n return render(request, 'caps/StudentUpdateProfile.html', context)\n # if User.objects.filter(username=request.user, counselor__isnull=False):\n # counselor_form = CounselorForm(\n # request.POST, request.FILES, instance=request.user.counselor)\n # if user_form.is_valid() and counselor_form.is_valid():\n # user_form.save()\n # counselor_form.save()\n # return redirect(\n # reverse('counselorprofile', args=[request.user.username]))\n # else:\n # context = {\n # 'user_form': user_form, 'counselor_form': counselor_form}\n # render(request, 'caps/CounselorUpdateProfile.html', context)", "def page_flipper(BASE_URL):\n soup = make_soup(BASE_URL)\n \n schools = []\n rankings = []\n schoolRanks = []\n pageLimit = 4\n index = 1\n \n while index <= pageLimit:\n section_url = BASE_URL + str(index)\n schoolRanks = get_rankings(section_url, schools, rankings)\n index += 1\n \n return schoolRanks", "def school():\n return render_template(\n 'school.html',\n title='School',\n year=datetime.now().year,\n message='Your school page.'\n )", "def add_professor(self, p):\n self.professor = p" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the text corpus, where there is one text file per professor, and the text file consists of all of that professor's reviews.
def build_corpus(start_idx, num_schools_to_process): current_corpus = get_current_corpus() school2info = pickle.load(open('../1.rate_my_prof/school2info.pkl', 'rb')) sorted_schools = sorted(list(school2info.keys())) print('Total num schools:', len(sorted_schools)) end_idx = min(len(sorted_schools), start_idx + num_schools_to_process) print('Processing schools from idx {} to {} ({} schools)'.format(start_idx, end_idx-1, end_idx-start_idx)) total_num_new_reviews = 0 for i in range(start_idx, end_idx): school = sorted_schools[i] sid, num_profs, prof_pages = school2info[school] if len(prof_pages) == 0: print('{}. {} -> no data on CS professors'.format(i, school)) else: school_num_new_reviews = 0 for prof_name, prof_url in prof_pages: fn = make_filename(prof_name, prof_url) if fn not in current_corpus: try: num_reviews, processed_reviews = parse_professor_page(prof_url) if len(processed_reviews) > 0: gender = predict_gender_from_reviews(processed_reviews) write_reviews_to_file(fn, prof_name, school, prof_url, num_reviews, gender, processed_reviews) school_num_new_reviews += len(processed_reviews) total_num_new_reviews += len(processed_reviews) except: print('Warning: failed on Prof. {} (id:{})'.format(prof_name, extract_prof_id(prof_url))) print('{}. {} -> num prof pages = {}, num new reviews = {}'.format(i, school, len(prof_pages), school_num_new_reviews)) print('\nFINISHED!') new_corpus = get_current_corpus() print('Num profs before: {}. Num profs now: {}.'.format(len(current_corpus), len(new_corpus)))
[ "def create_reuters_corpus():\n #adapted from\n #https://medium.com/@namanjain2050/\n #finding-similar-documents-reuters-dataset-example-part-4-eb0462e1ab2b\n documents = []\n corpus_filename = config.CORPUS[config.REUTERS]['corpusxml']\n if os.path.exists(corpus_filename) is True:\n if os.path.getsize(corpus_filename) > 0:\n print(\"Reuters corpus already exists\")\n return\n path = '/home/tjm/Documents/Winter2020/CSI4107/reuters21578'\n for filename in glob.glob(os.path.join(path, '*.sgm')):\n with open(filename, 'rb') as f:\n data = f.read()\n soup = bs4.BeautifulSoup(data, 'html.parser')\n docs = soup.findAll(\"reuters\")\n# contents = soup.findAll('body')\n for doc in docs:\n doc_attrs = doc.attrs\n title = \"\"\n body = \"\"\n topics = \"\"\n if doc.find(\"body\"):\n body = doc.find(\"body\").text.replace('\\n', ' ').replace('\\r', '')\n body = body.replace('\\x03', '').replace('\u001b[B', '').replace('\u0005\u001e', '')\n if doc.find(\"title\"):\n title = doc.find(\"title\").text\n if doc.find(\"topics\"):\n for topic in doc.find(\"topics\"):\n topics += topic.text + ' '\n documents.append([doc_attrs['newid'], title,\n body, topics])\n# for content in contents:\n# documents.append(content.text)\n\n xml_writer(documents, corpus_filename)", "def build_corpus(self):\n logging.info('Start')\n\n make_folder(self.file_path)\n self.gen_info_file()\n\n for term in self.search_terms:\n term_path = os.path.join(self.file_path, term)\n make_folder(term_path)\n logging.info(\"searching for %s\" % term)\n\n for year in self.dates_range:\n logging.error(\n \"Start retrieving %s in year %d\" % (term, year))\n data_path = os.path.join(term_path, str(year) + '.pickle')\n data = self.retrieve_all_in_year(term, year)\n if len(data) is not 0:\n with open(data_path, 'wb') as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n\n logging.info('End')", "def build_corpus(self):\n self.corpus = [self.dict.doc2bow(text) for text in self.all_text]\n gensim.corpora.MmCorpus.serialize(self.temp_path + 'corpus.mm', self.corpus)", "def create_corpus():\n\n num_queries = input(\"\\nNumber of queries: \")\n\n global NUM_DOCS_DOWNLOAD\n NUM_DOCS_DOWNLOAD = input(\"Number of docs for each query: \")\n\n global TOTAL_DOCS\n TOTAL_DOCS = num_queries * NUM_DOCS_DOWNLOAD\n\n print \"Total docs = %d\" % TOTAL_DOCS\n\n for i in range(num_queries):\n query = raw_input(\"\\nQuery: \")\n index_terms = preprocess_text(query)\n search_query = ' '.join(index_terms)\n\n # Download documents\n download_documents(search_query)\n\n # Generate text files from .doc files\n doc_to_text()", "def preprocess():\n\n # Load Data from json-file to list\n raw_data = []\n with open(ds_path) as f:\n for line in f:\n raw_data.append(json.loads(line))\n print(len(raw_data))\n\n # convert data from list to pandas dataframe\n df = pd.DataFrame(raw_data)\n\n # filter all review texts that have more then 30 characters\n df = df[df[\"reviewText\"].str.len() >= 30]\n\n # convert overall rating to sentiment\n df.insert(3, \"sentiment\", df[\"overall\"].replace({5.0: 1, 4.0: 1, 3.0: 0, 2.0: -1, 1.0: -1}), allow_duplicates=True)\n\n # compute minimum number of occurences of all sentiments\n sent_count_min = df[\"sentiment\"].value_counts().min()\n df = df.groupby(\"sentiment\").head(sent_count_min)\n\n # shuffle data (random_state for reproducibility)\n df = df.sample(frac=1, random_state=1).reset_index(drop=True)\n\n print(\"Total reviews: {}\".format(len(df)))\n print(df[\"overall\"].value_counts())\n\n df.head()\n\n print(\"Creating .txt file that contains {} reviews: {}\".format(rev_texts_path, len(df)))\n with open(\"../data/processed/gourmet.txt\", \"w\") as f:\n for i, row in df.iterrows():\n f.write(\"###{}\\n\".format(row[\"overall\"]))\n f.write(row[\"reviewText\"] + \"\\n\\n\")\n\n print(\"Creating {} documents that contains {} reviews each: {}\".format(nb_files, int(len(df) / nb_files),\n rev_texts_path))\n\n reviews_per_file = int(len(df) / nb_files)\n file_counter = 0\n reviews = \"\"\n review_counter = 0\n\n for i, row in df.iterrows():\n\n reviews += \"###{}\\n{}\\n\\n\".format(row[\"overall\"], row[\"reviewText\"])\n review_counter += 1\n\n if review_counter == reviews_per_file:\n with open(rev_texts_path + str(file_counter + 1) + \".txt\", \"w\") as f:\n f.write(reviews)\n\n reviews = \"\"\n file_counter += 1\n review_counter = 0\n\n with open(rev_texts_path + str(file_counter) + \".txt\", \"a\") as f:\n f.write(reviews)", "def extract_corpus(corpus_path, number_files, number_sentences, verbose):\n file_list = os.listdir(corpus_path)\n file_list = [file for file in file_list if file[0] != '.' for file in file_list]\n\n if number_files > 0 and number_files < len(file_list):\n random.shuffle(file_list)\n file_list = file_list[:number_files]\n\n corpus_doc = []\n for file in file_list:\n if verbose: print(file)\n corpus_doc.extend(extract_file(corpus_path+'/'+file))\n \n if number_sentences < len(corpus_doc) and number_sentences > 0:\n random.shuffle(corpus_doc)\n corpus_doc = corpus_doc[:number_sentences]\n\n if verbose:\n print(\"First 3 sentences: \")\n print(corpus_doc[:3])\n print(\"Number of sentences: \"+ str(len(corpus_doc)))\n \n return corpus_doc", "def make_corpus(in_f, out_f):\n wiki = WikiCorpus(in_f)\n with open(out_f, 'w') as output:\n i = 0\n for text in wiki.get_texts():\n output.write('\\n'.join(text) + '\\n')\n i += 1\n if not i % 10000:\n print(f'Processed {i} articles', end='\\r')\n print('\\nProcessing complete!')", "def generate_sentence_retrieval_training_set(path_to_infile, outfile, path_wiki_titles):\n\toutfile = open(outfile, \"w\")\n\tdocs = load_wiki_docs(path_to_infile, path_wiki_titles)\n\n\t\n\twith open(path_to_infile) as infile:\n\t\tfor line in infile:\n\t\t\tdata = json.loads(line)\n\t\t\tclaim = data[\"claim\"]\n\t\t\tlabel = data[\"label\"]\n\t\t\tevidence = data[\"evidence\"]\n\t\t\tpred_pages = data[\"predicted_pages\"]\n\n\t\t\t# if not verifiable, we don't have evidence and just continue\n\t\t\tif data[\"verifiable\"] == \"NOT VERIFIABLE\":\n\t\t\t\tcontinue\n\n\t\t\tpositive_examples = set()\n\t\t\tnegative_examples = set()\n\t\t\tgood_docs = set()\n\n\t\t\tfor evid in evidence:\n\t\t\t\tfor i,item in enumerate(evid):\n\t\t\t\t\tAnnotation_ID, Evidence_ID, Wikipedia_URL, sentence_ID = item\n\t\t\t\t\tWikipedia_URL = unicodedata.normalize(\"NFC\", Wikipedia_URL)\n\n\t\t\t\t\t# add positive example (only the first evidence)\n\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tpositive_examples.add((claim, Wikipedia_URL, sentence_ID, 0))\n\n\t\t\t\t\t\t# sample negative evidence:\n\t\t\t\t\t\tneg = sample_negative_example(Wikipedia_URL, docs)\n\t\t\t\t\t\tif neg != -1:\n\t\t\t\t\t\t\t#negative_examples.add((claim, neg[0], neg[1], 2))\n\t\t\t\t\t\t\tfor n in neg:\n\t\t\t\t\t\t\t\tnegative_examples.add((claim, n[0], n[1], 2))\n\t\t\t\t\t\tgood_docs.add(Wikipedia_URL)\n\t\t\t\t\t\n\t\t\t\t\t# otherwise we just want to add the document so that we don't sample negative examples from a \"good\" document\n\t\t\t\t\telse:\n\t\t\t\t\t\tgood_docs.add(Wikipedia_URL)\n\n\n\n\t\t\t# sample negative examples from other predicted pages which are not in good evidence\n\t\t\tfor page in pred_pages:\n\t\t\t\tif page in docs:\n\t\t\t\t\tif page not in good_docs:\n\t\t\t\t\t\tneg = sample_negative_example(page, docs)\n\n\t\t\t\t\t\tif neg != -1:\n\t\t\t\t\t\t\t#negative_examples.add((claim, neg[0], neg[1], 2))\n\t\t\t\t\t\t\t# only add first three sentences (first few are most indicative given false positive wiki docs, especially the first sentence)\n\t\t\t\t\t\t\tfor n in neg[:3]:\n\t\t\t\t\t\t\t\tnegative_examples.add((claim, n[0], n[1], 2))\n\t\t\t# write positive and negative evidence to file\n\t\t\tfor ex in positive_examples:\n\t\t\t\tsent = docs[ex[1]][ex[2]].split(\"\\t\")[1]\n\t\t\t\toutfile.write(ex[0] + \"\\t\" + ex[1] + \"\\t\" + sent + \"\\t\" + str(ex[3]) + \"\\t\" + str(ex[2]) + \"\\t\" + label + \"\\n\")\n\t\t\tfor ex in negative_examples:\n\t\t\t\ttry:\n\t\t\t\t\tsent = docs[ex[1]][ex[2]].split(\"\\t\")[1]\n\t\t\t\t#\tprint (ex[1], ex[2], \"------\",ex[0], \"-------\", sent, \"------\", ex[3])\n\t\t\t\t\toutfile.write(ex[0] + \"\\t\" + ex[1] + \"\\t\" + sent + \"\\t\" + \"2\" + \"\\t\" + str(ex[2]) + \"\\t\" + label + \"\\n\")\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\toutfile.close()", "def create_corpus(self, fpath):\n\n # read all eea documents from csv file\n eeadocs = textacy.fileio.read.read_csv(fpath)\n\n # '/code/data_all.csv'\n # use title as \"text\" to analyse.\n # therefore split title (first column 0) from metadata\n content_stream, metadata_stream = textacy.fileio.split_record_fields(\n eeadocs, 0)\n\n # create textacy english Corpus\n corpus = textacy.Corpus('en', texts=content_stream,\n metadatas=metadata_stream)\n\n return corpus", "def load_reviews(self):\n\n self.reviews = defaultdict(dict)\n np.random.seed(7)\n # populate reviews dict\n for review_type in [\"positive\", \"negative\"]:\n for cat in self.categories:\n file_path = os.path.join(\n self._init_file_dir,\n \"../../..\",\n \"text_data_corpus/reviews/{}/{}.review\".format(cat, review_type),\n )\n reviews_raw = BeautifulSoup(\n open(file_path).read(), features=\"html.parser\"\n )\n self.reviews[review_type][cat] = [\n self.strip_non_printable(review.text)\n for review in reviews_raw.find_all(\"review_text\")\n ]\n\n # merge all categories into one\n self.reviews[review_type] = list(\n chain(*list(self.reviews[review_type].values()))\n )\n np.random.shuffle(self.reviews[review_type])\n\n # save tokenized reviews to cache to speedup build process\n with open(self.cached_path_reviews, \"w\") as fp:\n json.dump(self.reviews, fp)", "def main():\n\t\n\t# create argument parser\n\tparser = argparse.ArgumentParser(description=\"Pre-processor for reddit corpus dumps. Parses text and stores it in a DocumentDatabase inventory.\")\n\t\n\t# add arguments\n\tparser.add_argument(\"--documents_path\", help=\"The path to the documents directory.\", default=\"data/documents/noun_chunked\")\n\tparser.add_argument(\"--documents_version\", help=\"The version of the document database to save to. (Defaults to a new version.)\", type=int)\n\tparser.add_argument(\"--reddit_path\", help=\"The path to the reddit corpus archive.\", default=\"data/raw/reddit/reddit_corpus.gz\")\n\t\n\t# parse\n\targs = parser.parse_args()\n\t\n\t# resolve documents version\n\tdocuments_version = args.documents_version\n\tif not documents_version:\n\t\tdocuments_version = DocumentDatabase.get_latest_version(args.documents_path) + 1\n\t\t\n\t# print setup information\n\tprint \"\"\n\tprint \"OBER TEXT PREPROCESSOR (NOUN CHUNK - REDDIT CORPUS DUMP)\"\n\tprint \"\"\n\tprint \"REDDIT ARCHIVE:\\t\\t%s\" % args.reddit_path\n\tprint \"SAVING TO:\\t\\t%s [VERSION: %d]\" % (args.documents_path, documents_version)\n\tprint \"\"\n\tprint \"\"\n\t\n\t# load spacy\n\tprint \"LOADING SPACY NLP LIBRARY ...\"\n\tnlp = spacy.load(\"en\")\n\n\t# load the reddit reader\n\tprint \"LOADING TEXTACY REDDIT CORPUS READER ...\"\n\treader = RedditReader(args.reddit_path)\n\t\n\t# load the document database\n\tprint \"LOADING DOCUMENT DATABASE ...\"\n\tdocument_database = DocumentDatabase.load(args.documents_path, version=documents_version)\n\t\n\t# get iterator of documents\n\tdocuments = reader.records(min_len=200)\n\t\n\t# split documents into paragraphs (document id, document title, paragraph)\n\tparagraphs = split_into_paragraphs(documents)\n\t\n\t# split iterator into two\n\tparagraphs_1, paragraphs_2 = itertools.tee(paragraphs)\n\t\n\t# one keeps index and titles\n\tparagraphs_1 = ( (paragraph[0], paragraph[1]) for paragraph in paragraphs_1 )\n\t# the other just keeps text\n\tparagraphs_2 = ( paragraph[2] for paragraph in paragraphs_2 )\n\t\n\t# combine noun chunks for the texts\n\tparagraphs_2 = combine_noun_chunks(nlp, paragraphs_2)\n\t\n\t# zip paragraphs_1 and paragraphs_2 back together\n\tparagraphs = itertools.izip(paragraphs_1, paragraphs_2)\n\t\n\t# group by index\n\tdocuments = itertools.groupby(paragraphs, lambda x: x[0])\n\t\n\t# format into JSON objects\n\tdocuments = ( { \"title\": document[0][1], \"paragraphs\": [ paragraph[1] for paragraph in document[1] ] } for document in documents )\n\t\n\t# begin parsing\n\tprint \"\\nBEGINNING PARSE ...\"\n\tdocument_database.add_documents(documents)", "def _train_save_dictionary_corpus(filein, n_grams, target,\n training=True,\n feature_count=100000):\n print('Building dictionary...')\n if training:\n dictionary = corpora.Dictionary(_list_grams(filein, n_grams))\n print('Dictionary len before filter = ', len(dictionary))\n dictionary.filter_extremes(no_below=5, no_above=0.5,\n keep_n=feature_count)\n print('Dictionary len after filter = ', len(dictionary))\n dictionary.save(\n f'nlp_training_data/{target}_subset.dict')\n corpus = [dictionary.doc2bow(word) for word in _list_grams(\n filein, n_grams)]\n corpora.MmCorpus.serialize(\n f'nlp_training_data/{target}_subset_corpus.mm', corpus)\n print(f'saved nlp_training_data/{target}_subset_corpus.mm')\n else:\n dictionary = corpora.Dictionary(_list_grams(filein, n_grams))\n print('Dictionary len before filter = ', len(dictionary))\n dictionary.filter_extremes(no_below=5, no_above=0.5,\n keep_n=feature_count)\n print('Dictionary len after filter = ', len(dictionary))\n dictionary.save(f'nlp_training_data/{target}_full.dict')\n corpus = [dictionary.doc2bow(word) for word in _list_grams(\n filein, n_grams)]\n corpora.MmCorpus.serialize(\n f'nlp_training_data/{target}_full_corpus.mm', corpus)\n print('DONE!')\n return dictionary, corpus", "def preprocess(words_file=\"../tools/word_data.pkl\", authors_file=\"../tools/email_authors.pkl\"):\n # the words (features) and authors (labels), already largely preprocessed this preprocessing will be repeated in the text learning mini-project\n print('words_file = {}'.format(words_file))\n word_data = pickle.load(open(words_file, \"rb\"))\n authors = pickle.load(open(authors_file, \"rb\"))\n\n # test_size is the percentage of events assigned to the test set (remainder go into training)\n features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)\n\n # text vectorization--go from strings to lists of numbers\n vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')\n features_train_transformed = vectorizer.fit_transform(features_train)\n features_test_transformed = vectorizer.transform(features_test)\n\n # feature selection, because text is super high dimensional and can be really computationally chewy as a result\n selector = SelectPercentile(f_classif, percentile=10)\n selector.fit(features_train_transformed, labels_train)\n features_train_transformed = selector.transform(features_train_transformed).toarray()\n features_test_transformed = selector.transform(features_test_transformed).toarray()\n\n # info on the data\n print(\"no. of Chris training emails:\", sum(labels_train))\n print(\"no. of Sara training emails:\", len(labels_train) - sum(labels_train))\n\n return numpy.array(features_train_transformed), numpy.array(features_test_transformed), numpy.array(labels_train), numpy.array(labels_test)", "def validation_sentences():\r\n # load the test data\r\n data_gen = AudioGenerator(spectrogram=True)\r\n data_gen.load_validation_data()\r\n # obtain the true transcription and the audio features\r\n num = 500\r\n f = open('C:/Users/mribles/Desktop/corpus.txt', 'a')\r\n while num > 490:\r\n transcr = data_gen.valid_texts[num]\r\n f.write(transcr + '\\n')\r\n num = num -1\r\n f.close()", "def preprocess(self):\n for key in self.markdown.keys():\n # data goes to this file \n f = open(key + \".txt\", \"wb\")\n # clean the data up before writing to file\n largeString = \"\\n\".join(self.markdown[key])\n sentences = self.get_sentences(largeString)\n for sentence in sentences:\n x = self.remove_chars(sentence) \n y = self.tokenize_punc(x)\n # write data to file sentence by sentence\n f.write(y.lstrip() + '\\n')\n f.close()", "def preProcess():\n global df\n \n #Read files from the corpus directory in read mode\n for filename in os.listdir(corpusroot):\n file = open(os.path.join(corpusroot, filename), \"r\", encoding='UTF-8')\n doc = file.read()\n file.close()\n doc = doc.lower()\n\n # tokenizing all the words from the document\n tokens = tokenizer.tokenize(doc)\n\n # stopwords remove and stemming\n # case 1 time = 3.834928661815138\n temp = []\n append = temp.append\n for token in tokens:\n if token not in stop_words: \n append(token)\n\n #Using map to map stemmer function to all temp list elemets at once and Typecating to list again\n tokens = list(map(stemmer, temp)) \n\n # case 2 time = 6.202010461137888\n # tokens = list(map(lambda x: stemmer(x), filter(lambda x: x not in stop_words, tokens)))\n\n # Counting term frequency and storing in tf dict. \n # Counter is inbuild function that Counts the element occurance in a list\n tf[filename] = Counter(tokens);\n \n # counting document frequency\n # converting tokens to set to remove duplicates which avoids multiple count in single document\n df += Counter(set(tokens))", "def _save_txt_nlp_data(db_name, collection_name, target,\n pos_ids=None, training=True):\n print('Making txt file of subset of target class')\n mc = MongoClient()\n db = mc[db_name]\n col = db[collection_name]\n target_pages = col.find({'target': target})\n df = pd.DataFrame(list(target_pages))[['_id', 'feature_union']]\n training_df = df[df['_id'].isin(pos_ids)]['feature_union']\n if training:\n with open(f'nlp_training_data/{target}_subset.txt', 'w') as fout:\n for row in training_df:\n if row != 'nan':\n fout.write(row + '\\n')\n else:\n with open(f'nlp_training_data/{target}_full.txt', 'w') as fout:\n for row in df['feature_union']:\n if row != 'nan':\n fout.write(row + '\\n')\n print('DONE!')", "def build_data(data, nr_sentences, vocab, reuters, rev):\n europarlEN_file = config.ALIGNED_EN_FILE\n europarlDE_file = config.ALIGNED_DE_FILE\n colEN = []\n rowEN = []\n dataEN = []\n\n colDE = []\n rowDE = []\n dataDE = []\n with codecs.open(europarlEN_file, 'r','utf-8') as fileEN:\n with codecs.open(europarlDE_file, 'r','utf-8') as fileDE:\n n = 0 # number of sentences already read\n for lineEN, lineDE in zip(fileEN, fileDE):\n if lineEN in ['\\n','\\r\\n'] or lineDE in ['\\n','\\r\\n']: # ignore empty lines\n continue\n if config.AVG:\n countEN = word_reps.count_words(lineEN, data.word_to_indexEN)\n countDE = word_reps.count_words(lineDE, data.word_to_indexDE)\n else:\n countEN = countDE = False\n\n sentenceEN = lineEN.lower().split()\n word_reps.sentence_to_rep(sentenceEN, data.EN, data.word_to_indexEN, colEN, rowEN, dataEN, countEN, n)\n sentenceDE = lineDE.lower().split()\n word_reps.sentence_to_rep(sentenceDE, data.DE, data.word_to_indexDE, colDE, rowDE, dataDE, countDE, n)\n\n n += 1\n if nr_sentences != 0 and n >= nr_sentences:\n break\n\n if rev:\n data.Y = csr_matrix(coo_matrix((dataEN, (rowEN, colEN)), shape = (data.EN.shape[1], n)))\n data.X = csr_matrix(coo_matrix((dataDE, (rowDE, colDE)), shape = (data.DE.shape[1], n)))\n EN = data.EN\n data.EN = data.DE\n data.DE = EN\n word_to_indexEN = data.word_to_indexEN\n data.word_to_indexEN = data.word_to_indexDE\n data.word_to_indexDE = word_to_indexEN\n else:\n data.X = csr_matrix(coo_matrix((dataEN, (rowEN, colEN)), shape = (data.EN.shape[1], n)))\n data.Y = csr_matrix(coo_matrix((dataDE, (rowDE, colDE)), shape = (data.DE.shape[1], n)))\n data.rowDE = rowDE\n data.colDE = colDE", "def clean_text(trainJSONData):\n\tcorpus = {} # this will store all the term over all documents as key ; and the value will be [index,documentFrequency]\n\tdocuments_info = {}\n\tcorpus_count = 0 # this also represent the index of each term in the corpus list ; also the dimension of the document vector space\n\tfor document in trainJSONData:\n\t\ttokens = text_process(document[\"abstract\"])\n\t\tdocumentLength = len(tokens)\n\t\ttermFrequency = {} # store the term frequency for this document\n\t\talreadyIncrement = {}\n\t\tfor token in tokens:\n\t\t\tif token not in corpus:\n\t\t\t\tcorpus_count += 1\n\t\t\t\tcorpus[token] = [corpus_count,0]\n\t\t\tif token not in termFrequency:\n\t\t\t\ttermFrequency[token] = 0\n\t\t\tif token not in alreadyIncrement:\n\t\t\t\tcorpus[token][1] += 1\n\t\t\t\talreadyIncrement[token] = None\n\t\t\ttermFrequency[token] += 1\n\t\t# store stuff to the document info dictionary\n\t\tdocuments_info[document[\"title\"]] = [tokens,document[\"type\"],termFrequency,documentLength]\n\n\t# represent the documents as vector over corpus space dimension\n\tDocument_vectors = []\n\tnumber_of_document = len(documents_info)\n\t# courpus_count will also be the dimension of the document vector space\n\t# for each document in the document_info\n\tfor document_name,info in documents_info.items():\n\t\t# init dimension vector\n\t\t# each document vector entry will be the tf-idf weight of corresponding position term\n\t\tdocument_vector = np.zeros(corpus_count) # 1 x corpus_count vector\n\t\tterms_in_document = info[0]\n\t\tClass = info[1]\n\t\ttermFrequency = info[2]\n\t\tdocumentLength = info[3]\n\t\tfor term in terms_in_document:\n\t\t\tdocumentFrequency = corpus[term][1]\n\t\t\tindex = corpus[term][0]\n\t\t\t# compute the tf-idf score and put it into the corresponding vector entriy\n\t\t\tdocument_vector[index-1] = compute_tf_idf_weight(termFrequency[term], documentFrequency, number_of_document)\n\t\tDocument_vectors.append([document_name, document_vector, Class])\n\n\treturn Document_vectors, corpus, number_of_document, corpus_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a queue of RDDs that will be mapped/reduced one at a time in 1 second intervals.
def process_rdd_queue(twitter_stream, nb_tweets = 5): rddQueue = [] for i in range(nb_tweets): json_twt = get_next_tweet(twitter_stream, i ) dist_twt = ssc.sparkContext.parallelize([json_twt], 5) rddQueue += [dist_twt] lines = ssc.queueStream(rddQueue, oneAtATime=False) lines.pprint()
[ "def create_queue(self, queue):", "def fill_queue(orders_of_the_day, queue_of_the_day):\n for order in orders_of_the_day:\n queue_of_the_day.enqueue(order)\n return queue_of_the_day", "def run_through_queue(arrival_epoch):\n\n MODE = 'single' # 'single' or 'changing_multi' or 'const_multi'\n n_sample = len(arrival_epoch)\n n_arrival_each_sample = [len(arrival_epoch[i]) for i in range(n_sample)]\n wait_time_c_list = [ffi.new('float[]', n_arrival_each_sample[i])\n for i in range(n_sample)]\n\n n_server = 5\n wait_time = np.empty(n_sample, dtype=list)\n for i in range(n_sample):\n wait_time_c = ffi.new('float[]', n_arrival_each_sample[i])\n const_multi_server_queue(ffi.new(\"float[]\", arrival_epoch[i]),\n ffi.new(\"float[]\", np.random.exponential(\n scale=10000, size=n_arrival_each_sample[i]).tolist()),\n wait_time_c,\n n_server,\n n_arrival_each_sample[i])\n wait_time[i] = list(wait_time_c)\n\n return wait_time", "def work_queues(strategy=integers):\n return lists(strategy()).map(list_to_queue)", "def fill_batch_queue(self):\n while True:\n if self.hps.mode != \"predict\":\n # Get bucketing_cache_size-many batches of Examples into a list, then sort\n inputs = []\n for _ in xrange(self.hps.batch_size * self.bucketing_cache_size):\n inputs.append(self.example_queue.get())\n # sort by length of encoder sequence\n inputs = sorted(inputs, key=lambda inp: inp.len)\n\n # Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n batches = []\n for i in xrange(0, len(inputs), self.hps.batch_size):\n batches.append(inputs[i:i + self.hps.batch_size])\n if not self.single_pass:\n shuffle(batches)\n for b in batches: # each b is a list of Example objects\n self.batch_queue.put(Batch(b, self.vocab, self.hps))\n else: # predict mode\n b = []\n for _ in xrange(self.hps.batch_size):\n b.append(self.example_queue.get())\n self.batch_queue.put(Batch(b, self.vocab, self.hps))", "def make_new_batch(in_queue, out_queue, batch_size):\n batches, num_samples = [], 0\n while True:\n batch_samples = in_queue.get()\n in_queue.task_done()\n if not isinstance(batch_samples, EndSignal):\n cur_num_samples = list(batch_samples.values())[0].shape[0]\n if num_samples + cur_num_samples < batch_size:\n batches.append(batch_samples)\n num_samples += cur_num_samples\n elif num_samples + cur_num_samples == batch_size:\n batches.append(batch_samples)\n out_queue.put(concat_batches(batches))\n batches, num_samples = [], 0\n else:\n num_splited = batch_size - num_samples\n first, second = split_batch(batch_samples, num_splited)\n batches.append(first)\n out_queue.put(concat_batches(batches))\n num_left = cur_num_samples - num_splited\n while num_left > batch_size:\n first, second = split_batch(second, batch_size)\n out_queue.put(first)\n num_left -= batch_size\n\n if num_left == batch_size:\n out_queue.put(second)\n batches, num_samples = [], 0\n else:\n batches, num_samples = [second], num_left\n else:\n if len(batches) > 0:\n out_queue.put(concat_batches(batches))\n out_queue.put(EndSignal())\n break", "def generateBatches(self):\n worker = Thread(target=self._generateBatches, args=())\n worker.setDaemon(True)\n worker.start()", "def executeRequests(self):\r\n for i in self.processQueue.queue:\r\n self.allocateMemory(i.pID, i.size//4)\r\n self.processQueue.queue = []", "def _create_scrape_queue(self) -> None:\n self._out(\"Creating scrape queue...\\n\")\n\n for url in tqdm(self._abs_endpoints):\n req = requests.get(url)\n\n if not req.ok:\n self._out(f\"Failed to GET {url}. ({str(req.status_code)})\")\n continue\n\n # Since we are accessing the generated (escaped) HTML of each\n # endpoint, we need to unescape it using a helper which replaces\n # the backslashes in order to to parse it with BeautifulSoup.\n html_unescaped = self._unescape(req.text)\n\n bs4 = self._bs4(html_unescaped)\n\n page_count = bs4.find(\n \"ul\", class_=\"pagination\"\n ).find_all(\"li\")[-1].string\n\n page_categories = bs4.find(\n \"td\", class_=\"td--nowrap\"\n ).find_all_previous(\"td\", class_=\"td__spec\")\n\n category_text: lambda cat: self._json_safe(\n cat.find(\"h6\", class_=\"specLabel\").string\n )\n\n categories = [category_text(c) for c in page_categories]\n\n self._scrape_queue.append(\n {\n \"url\": url,\n \"categories\": [c for c in reversed(categories)],\n \"page_count\": int(page_count)\n }\n )", "def import_data_queue():\n logger.info('Queue run.')\n\n colls = ('product.csv', 'customers.csv')\n\n for col in colls:\n worker = threading.Thread(target=_read_data_create_collection,\n args=(col,))\n worker.daemon = True\n worker.start()\n worker.join()\n\n return PROCESS_RESULT", "def create(parallel):\n queue = {k: v for k, v in parallel.items() if k in [\"queue\", \"cores_per_job\", \"mem\"]}\n yield queue", "def refresh_queue(self):\n #print(\"REF Q\")\n now_s = time.time()\n state = self.get_state()\n queue = self.queue = self.get_queue()\n for probe in self.get_probes():\n name = probe['name']\n if not name in queue:\n logger.debug(\"Adding entry for %s\", name)\n sched = self.cfg['schedules'][probe['schedule']]\n sched_st = self.mk_sched_entry(\n probe['name'],\n t_next=now_s,\n schedule=sched,\n )\n queue.add(sched_st)\n #print(\"Q: \", self.queue)\n s_queue = OrderedDict()\n for key, val in sorted(queue.items(), \n key=lambda key_val: (key_val[1]['t_next'], key_val[1]['interval'])):\n s_queue[key] = val\n self.queue = s_queue\n #print(\"SQ: \", s_queue)", "def _fill(self):\n\n self.queue.append((self.generator, -1))\n\n self.clock_ticks = 0\n while len(self.queue) > 0:\n self.clock_ticks += 1\n self._fill_at_once(len(self.queue))\n\n if self.show_progress:\n msg = f\"--- Oxygen is spreading, {self.clock_ticks} ---\\n\"\n msg += self.board.visualize()\n print(msg)\n time.sleep(0.4)", "def next_update(self):\n\n while True:\n if self.bandwidth:\n self.bandwidth = 0\n self.burst = 0\n else:\n self.bandwidth = 20*1024*1024\n self.burst = self.bandwidth\n yield (datetime.datetime.now() + datetime.timedelta(minutes=20), self.bandwidth, self.burst)", "def generate_from_queue(self):\n while True:\n yield self.input_queue.get()", "def list_to_queue(lst):\n work = Work()\n for elem in lst:\n work.put(elem)\n return work", "def create_parallel_1(self):\n for i in range(self.q):\n self.create_blocks(i)\n self.classes[i] = self.blocks", "def run_xtimes_tfilters(self):\n for _ in xrange(self.samples):\n i_buffer = self.rng.randint(0, self.buffer.size)\n rn_packets = self.buffer[i_buffer]\n for t_filter in self.t_filters:\n if rn_packets is None:\n break\n rn_packets = t_filter(rn_packets)\n\n if rn_packets != None:\n self.out_queue.put(rn_packets)", "def distribute_thread(self, smrf_queue, data_queue):\n self._logger.info(\"Distributing {}\".format(self.variable))\n\n for date_time in self.date_time:\n\n data = data_queue[self.variable].get(date_time)\n self.distribute(data)\n smrf_queue[self.variable].put([date_time, self.air_temp])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resizes image to target size progressively. Different from normal resize, this function will reduce the image size progressively. In each step, the maximum reduce factor is 2.
def progressive_resize_image(image, size): if not isinstance(image, np.ndarray): raise TypeError(f'Input image should be with type `numpy.ndarray`, ' f'but `{type(image)}` is received!') if image.ndim != 3: raise ValueError(f'Input image should be with shape [H, W, C], ' f'but `{image.shape}` is received!') height, width, channel = image.shape assert height == width assert height >= size num_iters = int(np.log2(height) - np.log2(size)) for _ in range(num_iters): height = max(height // 2, size) image = cv2.resize(image, (height, height), interpolation=cv2.INTER_LINEAR) assert image.shape == (size, size, channel) return image
[ "def resize_image(image, size):\n return skimage.transform.resize(image, size, preserve_range=True)", "def rescaled_image():", "def _resize(self):\n avg_frames = 87 #this is the average image frame length in the entire dataset\n for i in range(len(self.data)):\n image = self.data[i]\n self.data[i] = resize(image, width=avg_frames, height=len(image))", "def rescale(inputs, max_size=IMG_MAX_SIZE, itp_name='BI'):\n is_tensor = type(inputs) is torch.Tensor\n if is_tensor:\n h, w = inputs.shape[1:]\n else:\n w, h = inputs.size\n\n print(h, w, max_size)\n ratio = max_size / max(h, w)\n\n if ratio == 1:\n return inputs\n\n return T.Compose([\n __tf_if__(T.ToPILImage(), is_tensor),\n T.Resize((round(h * ratio), round(w * ratio)), interpolation=__get_interpolation__(itp_name)),\n __tf_if__(T.ToTensor(), is_tensor),\n ])(inputs)", "def Expand(image):\r\n\treturn resize_image(image, 2)", "def _resize_pillars(self):\n self.image = pygame.transform.smoothscale(self.image, (100, 650))", "def main_shrink_resolution():\n img = cv2.imread(IMAGE_GRAY)\n images = [(n, shrink_resolution(img, n)) for n in (3,5,7,20,100)]\n show_images(images)", "def scale_image(self, image, new_width=100):\n (original_width, original_height) = image.size\n aspect_ratio = original_height/float(original_width)\n new_height = int(aspect_ratio * new_width)\n\n new_image = image.resize((new_width, new_height))\n return new_image", "def scale(img, factor):\n interpolation = cv2.INTER_AREA if factor <= 1 else cv2.INTER_LANCZOS4\n\n return cv2.resize(img, dsize=(0, 0), fx=factor, fy=factor, interpolation=interpolation)", "def Reduce(image):\r\n\t# applies gaussian smoothing kernel to image before reducing\r\n\treturn resize_image(smoothed_image(image), 0.5)", "def resize(self, newSize):\n\n\t\tif self.kwargs[\"borderSize\"]:\n\t\t\tself.image = stretch_image(self.image, newSize, \\\n\t\t\tself.kwargs[\"borderSize\"])\n\t\telse:\n\t\t\tself.image = resize_image(self.image, newSize, \\\n\t\t\t\tself.kwargs[\"antialiasing\"])\n\t\tself.kwargs[\"size\"] = tuple(newSize)", "def resize(src, dsize, dst=..., fx=..., fy=..., interpolation=...) -> dst:\n ...", "def resize_by_width(image, target_width, interpolation=cv2.INTER_LANCZOS4):\n src_height, src_width = image.shape[0:2]\n if target_width == src_width:\n # There is nothing to do\n return image\n target_height = int(round(target_width * src_height / src_width))\n return cv2.resize(image, (target_width, target_height), interpolation=interpolation)", "def shrink(image_generator, factor=2, fast=False):\n if fast:\n fast = factor = int(factor) # Need this....?\n go_fast = fast and int_factor == factor\n for image in image_generator:\n if go_fast:\n yield image[::int(factor), ::int(factor)]\n else:\n yield pyramid.pyr_down(image, factor).astype(numpy.uint8)", "def _resize_img(self, results):\n img = results[\"img\"]\n if self.keep_aspect:\n img, _ = mmcv.imrescale(img, results[\"scale\"], return_scale=True)\n new_h, new_w = img.shape[:2]\n h, w = results[\"img\"].shape[:2]\n w_scale = new_w / w\n h_scale = new_h / h\n else:\n img, w_scale, h_scale = mmcv.imresize(\n img, results[\"scale\"], return_scale=True\n )\n results[\"img\"] = img\n\n scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], dtype=np.float32)\n results[\"img_shape\"] = img.shape\n results[\"pad_shape\"] = img.shape\n results[\"scale_factor\"] = scale_factor\n results[\"keep_aspect\"] = self.keep_aspect", "def upscale_old_images():\n raw_path = join(ROOT, '_raw_images')\n tmp_path = join(ROOT, '_raw_images/tmp')\n os.makedirs(tmp_path, exist_ok=True)\n\n image_names = os.listdir(join(ROOT, '_raw_images'))\n for image in image_names:\n try:\n image_width = Image.open(join(raw_path, image)).size[0]\n except (UnidentifiedImageError, IsADirectoryError):\n continue\n if image_width == 600:\n print(f\"Upscaling {image}\")\n upscale = (r\"convert -resize 720x720 \"\n f'\"{join(raw_path, image)}\" \"{join(tmp_path, image)}\"')\n subprocess.Popen(upscale, shell=True).wait()", "def resize_image(img, new_width, new_height):\n img_new = img.resize((new_width, new_height))\n return img_new", "def resize_image(image: Image.Image, scale_factor: int) -> Image.Image:\n if scale_factor == 1:\n return image\n return image.resize((image.width // scale_factor, image.height // scale_factor), resample=Image.BILINEAR)", "def image_rescale(img, final_dim: tuple):\n rescale_factor = (final_dim[0] / img.shape[1], final_dim[1] / img.shape[0])\n img_rescaled = cv2.resize(img, final_dim)\n return img_rescaled, rescale_factor" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Specialized record with correlation_id.
def makeRecord(self, *args, **kwargs): rv = super(LEGALogger, self).makeRecord(*args, **kwargs) # Adding correlation_id if not already there if 'correlation_id' in rv.__dict__.keys(): return rv rv.__dict__['correlation_id'] = _cid.get() or '--------' return rv
[ "def gen_record(document_id, primary_doc, gen_links):\n pass", "def _patient_wrapper(row):\n from bhoma.apps.patient.models import CPatient\n data = row.get('value')\n docid = row.get('id')\n doc = row.get('doc')\n if not data or data is None:\n return row\n if not isinstance(data, dict) or not docid:\n return row\n else:\n if 'rev' in data:\n data['_rev'] = data.pop('rev')\n case = cls.wrap(data)\n case.patient = None\n if doc == None:\n # there's (I think) a bug in couchdb causing these to come back empty\n try:\n doc = CPatient.get_db().get(docid)\n except Exception, e:\n pass\n if doc and doc.get(\"doc_type\") == \"CPatient\":\n case.patient = CPatient.wrap(doc)\n return case", "def gen_record_item(record: RecordType):\n raise NotImplementedError", "def get_relationship_record(self, relationship_record_type):\n return # osid.relationship.records.RelationshipRecord", "def test_correlation_id():\n _user_logging(\n {'X-CorrelationID': '298ebf9d-be1d-11e7-88ff-2c44fd152860'},\n {},\n {'correlation_id': v_str('298ebf9d-be1d-11e7-88ff-2c44fd152860')},\n True\n )", "def get_record(self):\n return get_record(self.id)", "def create_custom_id(self):\n if self.id:\n return self.id\n\n notable_raw_data = self.data.get('_raw', '')\n raw_hash = hashlib.md5(notable_raw_data).hexdigest() # nosec\n\n if self.time_is_missing and self.index_time:\n notable_custom_id = '{}_{}'.format(self.index_time, raw_hash) # index_time stays in epoch to differentiate\n demisto.debug('Creating notable custom id using the index time')\n else:\n notable_custom_id = '{}_{}'.format(self.occurred, raw_hash)\n\n return notable_custom_id", "def get_record(self, domain, record):\r\n return domain.get_record(record)", "def getRecord(metadataPrefix, identifier):", "def add_correlation_key(self, name: str, value: typing.Any, type_: str = None) -> None:\n self.correlation_keys[name] = {'value': value, 'type': type_}", "def annotate_record(self, record, variant_result):\n record.INFO['variant_id'] = variant_result.variant_id\n record.INFO['gene'] = \",\".join(variant_result.genes)\n record.INFO['gnomad_exomes_AF'] = variant_result.gnomad_exomes_af\n record.INFO['gnomad_genomes_AF'] = variant_result.gnomad_genomes_af\n record.ALT = variant_result.alt\n record.POS = variant_result.pos\n record.ID = \";\".join(variant_result.rs_ids) or \".\"\n return record", "def create_record(self, context, domain_id, values):", "def getRecord(self, metadataPrefix, identifier):\n if metadataPrefix and not (metadataPrefix in self.protocolMap.recordNamespaces):\n raise CannotDisseminateFormatError()\n \n if not self.metadataRegistry.hasWriter(metadataPrefix):\n # need to create a 'MetadataWriter' for this schema for oaipmh to use, and put in self.metadataRegister\n schemaId = self.protocolMap.recordNamespaces[metadataPrefix]\n txr = self.protocolMap.transformerHash.get(schemaId, None)\n mdw = Cheshire3OaiMetadataWriter(txr)\n self.metadataRegistry.registerWriter(metadataPrefix, mdw)\n \n q = cqlparse('rec.identifier exact \"%s\"' % (identifier))\n try:\n rs = self.db.search(session, q)\n except SRWDiagnostics.Diagnostic16:\n raise ConfigFileException('Index map for rec.identifier required in protocolMap: %s' % self.db.get_path(session, 'protocolMap').id)\n \n if not len(rs) or len(rs) > 1:\n raise IdDoesNotExistError('%s records exist for this identifier' % (len(rs)))\n \n r = rs[0] \n rec = r.fetch_record(session)\n # now reverse lookup lastModificationDate\n q = cqlparse('rec.lastModificationDate < \"%s\"' % (datetime.datetime.utcnow()))\n pm = self.db.get_path(session, 'protocolMap') # get CQL ProtocolMap\n idx = pm.resolveIndex(session, q)\n vector = idx.fetch_vector(session, rec)\n term = idx.fetch_termById(session, vector[2][0][0])\n try:\n datestamp = datetime.datetime.strptime(term, '%Y-%m-%dT%H:%M:%S')\n except ValueError:\n datestamp = datetime.datetime.strptime(term, '%Y-%m-%d %H:%M:%S')\n return (Header(str(r.id), datestamp, [], None), rec, None)", "def create_correlation_data(version, correlated_entity=\"subtopics\"):\n # first check if the Correlation object with same version exists or not\n try:\n Correlation.objects.get(version=str(version))\n raise Exception(\"Correlation object of version {} already exists. Provide another version\".format(version))\n except Correlation.DoesNotExist:\n pass\n\n classified_documents = ClassifiedDocument.objects.all()\n classified_documents = [(x.classification_label, x.text) for x in classified_documents]\n correlation = get_documents_correlation(classified_documents)\n correlation_obj = Correlation.objects.create(\n correlated_entity=correlated_entity,\n version=version,\n correlation_data=correlation\n )\n return correlation_obj", "def createNew(self,request_pkt,reply_pkt,ras_obj):\n\tnew_ras_msg=RasMsg(request_pkt,reply_pkt,ras_obj)\n\tnew_ras_msg[\"unique_id\"]=self[\"unique_id\"]\n\tnew_ras_msg[self[\"unique_id\"]]=self.getUniqueIDValue()\n\treturn new_ras_msg", "def from_db_response(cls, record: Dict[str, Any]) -> BaseModel:\n raise NotImplementedError", "def correlationIdAt(self, index):\n errorCode, cid = internals.blpapi_ResolutionList_correlationIdAt(\n self.__handle,\n index)\n _ExceptionUtil.raiseOnError(errorCode)\n return cid", "def set_correlation(\n name: str, value: object, context: typing.Optional[Context] = None\n) -> Context:\n correlations = get_correlations(context=context)\n correlations[name] = value\n return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)", "def raw(self, oid):\n return Raw(self, oid)", "def serialized_id(self) -> str:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yields EXPLAIN result rows for given queries
def explain_queries(database, queries): # analyze only SELECT queries from the log for query in filter(is_select_query, queries): try: for row in database.explain_query(query): table_used = row['table'] index_used = row['key'] yield (query, table_used, index_used, row) except IndexDigestError: logger = logging.getLogger('explain_queries') logger.error('Cannot explain the query: %s', query)
[ "def explain_queries(database, queries):\n # analyze only SELECT queries from the log\n for query in filter(is_select_query, queries):\n for row in database.explain_query(query):\n table_used = row['table']\n index_used = row['key']\n\n yield (query, table_used, index_used, row)", "def used_indexes(query, using=DEFAULT_DB_ALIAS):\n connection = connections[using]\n with connection.cursor() as cursor:\n cursor.execute(\"EXPLAIN \" + query)\n\n return {row[\"key\"] for row in fetchall_dicts(cursor) if row[\"key\"] is not None}", "def supports_explaining_query_execution(self):\n return self.connection.ops.explain_prefix is not None", "def main():\n idx = index(os.path.join('collection/'))\n\n print(idx.exact_query(['with', 'without', 'yemen'], 10))\n print(idx.inexact_query_champion(['with', 'without', 'yemen'], 10))\n print(idx.inexact_query_index_elimination(['with', 'without', 'yemen'], 10))\n print(idx.inexact_query_cluster_pruning(['with', 'without', 'yemen'], 10))\n\n print(idx.exact_query(['with', 'without', 'yemen', 'yemeni'], 10))\n print(idx.inexact_query_champion(['with', 'without', 'yemen', 'yemeni'], 10))\n print(idx.inexact_query_index_elimination(['with', 'without', 'yemen', 'yemeni'], 10))\n print(idx.inexact_query_cluster_pruning(['with', 'without', 'yemen', 'yemeni'], 10))\n\n print(idx.exact_query(['berlin', 'poland', 'szczecin', 'obacz', 'plane'], 10))\n print(idx.inexact_query_champion(['berlin', 'poland', 'szczecin', 'obacz', 'plane'], 10))\n print(idx.inexact_query_index_elimination(['berlin', 'poland', 'szczecin', 'obacz', 'plane'], 10))\n print(idx.inexact_query_cluster_pruning(['berlin', 'poland', 'szczecin', 'obacz', 'plane'], 10))\n\n print(idx.exact_query(['abc', 'pqr', 'xyz'], 10))\n print(idx.inexact_query_champion(['abc', 'pqr', 'xyz'], 10))\n print(idx.inexact_query_index_elimination(['abc', 'pqr', 'xyz'], 10))\n print(idx.inexact_query_cluster_pruning(['abc', 'pqr', 'xyz'], 10))\n\n print(idx.exact_query(['million', 'billion'], 10))\n print(idx.inexact_query_champion(['million', 'billion'], 10))\n print(idx.inexact_query_index_elimination(['million', 'billion'], 10))\n print(idx.inexact_query_cluster_pruning(['million', 'billion'], 10))\n # idx.print_dict()\n # idx.print_doc_list()", "def _slow_search(self, qs):\n for k,d in self.by_created(desc=True):\n # rip through each document in the db...\n # performing the queries on each one\n matched_all = True\n for q in qs:\n # on each document, first see if the key even exists\n qk = q.key\n if qk not in d:\n matched_all = False\n break\n\n # now check each query against the doc\n dv = d.get(qk)\n for op, val in q._and_ops:\n if not op(dv, val):\n matched_all = False\n break\n # if not all( op(dv, val) for op,val in q._and_ops ):\n # matched_all = False\n # break\n if matched_all:\n # if we got here, we matched all queries\n yield k, d", "def table_index_usage():\n query_table_index_usage(current_app.extensions['sqlalchemy'].db)", "def run_analytical_queries(engine, n):\n cur = engine.cursor()\n\n for q_name, q in analytical_queries.items():\n # Discard a query in case there's an initial connect time\n with Timer(enter_message=f\"Running a junk query in case there's an initial connect time\",\n exit_message=\"\\t--> junk query done\",\n print_function=logger.info,\n ):\n cur.execute(discardable_query)\n with Timer(enter_message=f\"Running query {q_name} {n} times\", exit_message=\"\\t--> batch run done\",\n print_function=logger.info,\n ):\n logger.debug(f\"query definition:\\n{q}\")\n for i in range(n):\n with Timer(exit_message=f\"\\t--> {i}\",\n print_function=logger.info,\n ):\n cur.execute(q)", "def _prefix_explain(query: str) -> str:\n\treturn \"EXPLAIN (FORMAT GRAPHVIZ)\\n {0}\".format(query)", "def queries_popular():\n query_queries_popular(current_app.extensions['sqlalchemy'].db)", "def printExplain(self, server, ns):\n explainOutput = server.explainQuery(ns, self.__parts)\n if not explainOutput:\n return False\n\n print()\n print('Cursor:', explainOutput['cursor'])\n print('Indexes:', end=' ')\n for index in explainOutput['indexBounds']:\n print(index, end=' ')\n print()\n print('IndexOnly:', explainOutput['indexOnly'])\n print('MultiKey:', explainOutput['isMultiKey'])\n print('Miliseconds:', explainOutput['millis'])\n print('Documents:', explainOutput['n'])\n print('ChunkSkips:', explainOutput['nChunkSkips'])\n print('Yields:', explainOutput['nYields'])\n print('Scanned:', explainOutput['nscanned'])\n print('ScannedObjects:', explainOutput['nscannedObjects'])\n if 'scanAndOrder' in explainOutput:\n print('ScanAndOrder:', explainOutput['scanAndOrder'])\n\n return True", "def test_psycopg2_composable_query_works(instrument, postgres_connection, elasticapm_client):\n from psycopg2 import sql\n\n cursor = postgres_connection.cursor()\n query = sql.SQL(\"SELECT * FROM {table} WHERE {row} LIKE 't%' ORDER BY {row} DESC\").format(\n table=sql.Identifier(\"test\"), row=sql.Identifier(\"name\")\n )\n baked_query = query.as_string(cursor.__wrapped__)\n result = None\n try:\n elasticapm_client.begin_transaction(\"web.django\")\n cursor.execute(query)\n result = cursor.fetchall()\n elasticapm_client.end_transaction(None, \"test-transaction\")\n finally:\n # make sure we've cleared out the spans for the other tests.\n assert [(2, \"two\"), (3, \"three\")] == result\n transactions = elasticapm_client.events[TRANSACTION]\n spans = elasticapm_client.spans_for_transaction(transactions[0])\n span = spans[0]\n assert span[\"name\"] == \"SELECT FROM test\"\n assert \"db\" in span[\"context\"]\n assert span[\"context\"][\"db\"][\"instance\"] == \"elasticapm_test\"\n assert span[\"context\"][\"db\"][\"type\"] == \"sql\"\n assert span[\"context\"][\"db\"][\"statement\"] == baked_query", "def yield_table_query(self, engine: Engine) -> Iterator[TableQuery]:\n with engine.connect() as conn:\n rows = conn.execute(\n self.get_sql_statement(\n start_time=self.start,\n end_time=self.end,\n )\n )\n for row in rows:\n query_dict = dict(row)\n try:\n yield TableQuery(\n query=query_dict[\"query_text\"],\n databaseName=self.get_database_name(query_dict),\n serviceName=self.config.serviceName,\n databaseSchema=self.get_schema_name(query_dict),\n )\n except Exception as exc:\n logger.debug(traceback.format_exc())\n logger.warning(f\"Error processing query_dict {query_dict}: {exc}\")", "def query_and_fetchall(self, query):\n with vertica_python.connect(**conn_info) as conn:\n cur = conn.cursor()\n cur.execute(query)\n\n return cur.fetchall()", "def query(cql: str, session: cassandra.cluster.Session, print_result: bool = True):\n res = [e for e in session.execute(cql)]\n \n if print_result:\n pprint(res)\n \n return res", "def get_popular_queries(self, spec):\n cond = {'counter':{'$exists':True}}\n for row in self.col.find(fields=['qhash'], spec=cond).\\\n sort('counter', DESCENDING):\n spec = {'qhash': row['qhash'], 'counter':{'$exists': False}}\n for res in self.col.find(spec=spec):\n yield res", "def indexes(\n ctx,\n path,\n tables,\n aux,\n nl,\n arrays,\n csv,\n tsv,\n no_headers,\n table,\n fmt,\n json_cols,\n load_extension,\n):\n sql = \"\"\"\n select\n sqlite_master.name as \"table\",\n indexes.name as index_name,\n xinfo.*\n from sqlite_master\n join pragma_index_list(sqlite_master.name) indexes\n join pragma_index_xinfo(index_name) xinfo\n where\n sqlite_master.type = 'table'\n \"\"\"\n if tables:\n quote = sqlite_utils.Database(memory=True).quote\n sql += \" and sqlite_master.name in ({})\".format(\n \", \".join(quote(table) for table in tables)\n )\n if not aux:\n sql += \" and xinfo.key = 1\"\n ctx.invoke(\n query,\n path=path,\n sql=sql,\n nl=nl,\n arrays=arrays,\n csv=csv,\n tsv=tsv,\n no_headers=no_headers,\n table=table,\n fmt=fmt,\n json_cols=json_cols,\n load_extension=load_extension,\n )", "def run_query(conn, query):\n with conn.cursor(as_dict=True) as cursor:\n cursor.execute(query)\n for row in cursor:\n yield row", "def queries_long_running():\n query_queries_long_running(current_app.extensions['sqlalchemy'].db)", "def do_show_plan(args):\n # Expand the query template.\n ns = setup_namespace(args.json_params)\n q = query_template.expand_file(args.qt_filename, ns)\n # Get the Redshift connection.\n conn = get_connection(args)\n cs = conn.cursor()\n # Set the query_group.\n conn_args = get_conn_args(args)\n query_group = _pick_query_group(args, conn_args)\n if query_group:\n cs.execute(\"SET query_group TO '%s';\" % (query_group,))\n logger.info(\"SET query_group TO '%s';\" % (query_group,))\n # Set the search_path.\n search_path = conn_args.get(\"search_path\")\n if search_path is not None:\n cs.execute(\"SET search_path TO %s;\" % (search_path,))\n logger.info(\"SET search_path TO %s;\" % (search_path,))\n # Run the explain.\n cs.execute(\"explain \"+q)\n # Write the plan to stdout.\n while 1:\n row = cs.fetchone()\n if row is None:\n break\n print row[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Randomizes days of week and hours when lectures will take place
def set_lectures_time(self, min_hour=8, max_hour=19, days=[1,2,3,4,5]): print("--- set lectures time ---") dict_lectures, dict_group_lectures = self.prepare_lectures() for sch_subject_list in dict_group_lectures.values(): tries = HOW_MANY_TRIES while tries > 0: when_start = randint(min_hour, max_hour) which_day = choice(days) sch_subject_list[0].whenStart = time(when_start, 0, 0) sch_subject_list[0].dayOfWeek = which_day sch_subject_list[0].whenFinnish = time(when_start + sch_subject_list[0].how_long, 0, 0) check_for_this_key = "" for key, value in dict_lectures.items(): if value.compare_to(sch_subject_list[0]): check_for_this_key = key break if self.check_event_can_be_set(event=sch_subject_list[0], event_id=check_for_this_key, dict_of_subjects=dict_lectures): for sch_subject in sch_subject_list: sch_subject.whenStart = time(when_start, 0, 0) sch_subject.dayOfWeek = which_day sch_subject.whenFinnish = time(when_start + sch_subject_list[0].how_long, 0, 0) break tries -= 1 if tries == 0: raise Exception("lectures cannot be set!")
[ "def gen_modelled_date(start_date, end_date):\n # 2012, 2013, 2014\n year_model = [1, 2, 4]\n year_model = reduce(lambda x, y: x+y, [[year]*freq for year, freq in\n zip(range(2012, 2015), year_model)])\n rand_year = random.choice(year_model)\n\n\n # J F M A M J J A S O N D\n month_model = [1, 4, 8, 9, 7, 5, 4, 6, 8, 12, 10, 6]\n month_model = reduce(lambda x, y: x+y, [[month]*freq for month, freq in\n zip(range(1, 13), month_model)])\n rand_month = random.choice(month_model)\n\n week_dict = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: []} \t\n num_days_in_month = monthrange(rand_year, rand_month)[1]\n\n for day in range(1, num_days_in_month+1):\n week_dict[datetime.date(rand_year, rand_month, day).weekday()] += [day] \n \n\n # M T W R F S S\n week_model = [2, 1, 1, 2, 4, 8, 3]\n week_model = reduce(lambda x, y: x+y, [[week]*freq for week, freq in\n zip(range(7), week_model)])\n rand_day = random.choice(week_dict[random.choice(week_model)])\n\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20\n # 21 22 23\n hour_model = [1, 1, 1, 1, 1, 1, 2, 9, 7, 5, 2, 1, 1, 2, 2, 3, 4, 14,\n 10, 8, 6, 3, 1, 1]\n hour_model = reduce(lambda x, y: x+y, [[hour]*freq for hour, freq in\n zip(range(24), hour_model)])\n rand_hour = random.choice(hour_model)\n \n rand_minute = random.choice(range(60))\n\n rand_second = random.choice(range(60))\n \n random_timestamp_arr = [rand_year, rand_month, rand_day, rand_hour,\n rand_minute, rand_second]\n return random_timestamp_arr", "def random_festival_datetime():\n era = random.choice(range(len(telisaran.Era.years)))\n max_year = 20000 if era == 2 else telisaran.Era.years[era]\n return telisaran.datetime(\n era=era + 1,\n year=random.choice(range(1, max_year + 1)),\n season=9,\n day=random.choice(range(1, telisaran.FestivalOfTheHunt.length_in_days + 1)),\n hour=random.choice(range(24)),\n minute=random.choice(range(60)),\n second=random.choice(range(60))\n )", "def random_date(start, end):\n random_time = start + timedelta(\n seconds=randint(0, int((end - start).total_seconds())),\n )\n hour = numpy.random.choice(hours, p=probabilities)\n return random_time.replace(hour=hour)", "async def random_day(self, ctx):\n # Note: day command invokes this command\n await ctx.embed_reply(random.choice(calendar.day_name))", "def gen_date():\r\n return random.randint(DAY1, TODAY)", "def d12():\n\treturn random.randint(1, 12)", "def week():", "def sim_day(students, infects, day=0, p=0.02):\n if weekend_check and day != 0 and (day+1) % 6 == 0:\n #do nothing\n # print(f'Day = {day}: 6 do nothing')\n pass\n elif weekend_check and day != 0 and (day+1) % 7 == 0:\n #do nothing\n # print(f'Day = {day}: 7 do nothing')\n pass\n else:\n # print(f'Day = {day}: do the thing')\n for stu in range(len(stu_array)):\n if students[stu, 0] == 1 and students[stu,1] in range(1, 4): # Student has caught the flu and is contagious\n for peer in range(len(students)):\n if students[peer, 0] == 0 and stu != peer: # Peer is susceptible to flu except on weekends\n rand = np.random.uniform(0,1)\n if rand <= p: # Peer gets infected\n students[peer, 0] = 1\n infects += 1\n return students, infects", "def random_datetime():\n era = random.choice(range(len(telisaran.Era.years)))\n max_year = 20000 if era == 2 else telisaran.Era.years[era]\n return telisaran.datetime(\n era=era + 1,\n year=random.choice(range(1, max_year + 1)),\n season=random.choice(range(1, telisaran.Year.length_in_seasons + 1)),\n day=random.choice(range(1, telisaran.Season.length_in_days + 1)),\n hour=random.choice(range(24)),\n minute=random.choice(range(60)),\n second=random.choice(range(60))\n )", "def day_night_cycle(self, env):\n while True:\n # crude daynight cycle starting from midnight, we wait for 9 hours to morning,\n # raise temperature by 2.5 to 4 degrees\n yield env.timeout(60 * 60 * 9)\n self.is_night = False\n self.temperature += random.uniform(2.5, 4)\n # from 11 to 3, raise temperature by a random number between 4 to 5\n yield env.timeout(60 * 60 * 2)\n self.temperature += random.uniform(3, 4)\n # after 3, decrease temperature by 2.5 to 4 degrees\n yield env.timeout(60 * 60 * 4)\n self.temperature -= random.uniform(2.5, 4)\n # after 6, go back to room temperature\n yield env.timeout(60 * 60 * 3)\n self.is_night = True\n self.temperature = 28.0\n # to midnight and restart again\n yield env.timeout(60 * 60 * 6)", "def randomise(self):\n self.timer = self.period * random.random()", "def test_scores_by_week_simulation(self):\n pass", "def generate_time_series_data(days: int=100, business_hours_base: float=10,\n non_business_hours_base: float=10, growth: float=1,\n autocorrelation: float=0, spikiness: float=1,\n holiday_rate: float=0, frequency: str=CountStat.DAY,\n partial_sum: bool=False, random_seed: int=26) -> List[int]:\n if frequency == CountStat.HOUR:\n length = days*24\n seasonality = [non_business_hours_base] * 24 * 7\n for day in range(5):\n for hour in range(8):\n seasonality[24*day + hour] = business_hours_base\n holidays = []\n for i in range(days):\n holidays.extend([random() < holiday_rate] * 24)\n elif frequency == CountStat.DAY:\n length = days\n seasonality = [8*business_hours_base + 16*non_business_hours_base] * 5 + \\\n [24*non_business_hours_base] * 2\n holidays = [random() < holiday_rate for i in range(days)]\n else:\n raise AssertionError(f\"Unknown frequency: {frequency}\")\n if length < 2:\n raise AssertionError(\"Must be generating at least 2 data points. \"\n f\"Currently generating {length}\")\n growth_base = growth ** (1. / (length-1))\n values_no_noise = [seasonality[i % len(seasonality)] * (growth_base**i) for i in range(length)]\n\n seed(random_seed)\n noise_scalars = [gauss(0, 1)]\n for i in range(1, length):\n noise_scalars.append(noise_scalars[-1]*autocorrelation + gauss(0, 1)*(1-autocorrelation))\n\n values = [0 if holiday else int(v + sqrt(v)*noise_scalar*spikiness)\n for v, noise_scalar, holiday in zip(values_no_noise, noise_scalars, holidays)]\n if partial_sum:\n for i in range(1, length):\n values[i] = values[i-1] + values[i]\n return [max(v, 0) for v in values]", "def weekDayOrEnd(weekday,currentDate,timeDiff, time):\r\n while True: # Trying to find a weekday within 2 weeks of currentDate\r\n lower = max(0,currentDate.timetuple().tm_yday-1-14)\r\n upper = min(364+int(isleap(currentDate.year)),currentDate.timetuple().tm_yday-1+14)\r\n randomDay = random.randint(lower,upper)\r\n newDate = time[randomDay*24*60//timeDiff]\r\n if (weekday and newDate.weekday() >= 0 and newDate.weekday() <= 4) or (not(weekday) and not(newDate.weekday() >= 0 and newDate.weekday() <= 4)):\r\n break\r\n return randomDay", "def generate_rand_7():\n\n while (True):\n # This generates a random number uniformly distributed between 1 and 24.\n # The first term is 5 times a rand num between 1 - 4, yielding {5, 10,\n # 15, 20}. The second is a rand num between 1 - 4.\n # Since the two numbers are *independent*, adding them gives a rand num\n # uniformly distributed between 1 - 24.\n # The test then rejects any number that is 21 or above. This is then\n # divided into 7 numbers between 1 - 7 using % 7. Since there are 21\n # numbers in the interval [1, 21] and 21 is divisble by 7, the numbers\n # between 1 and 7 will occur with equal probability.\n num = 5 * (np.random.uniform(1, 5, 1) - 1) +\\\n (np.random.uniform(1, 5, 1) - 1)\n if num[0] < 21:\n return int(num[0] % 7 + 1)", "def generateRandomWorkTime(self):\n assert self.workTime == 0\n self.workTime = self.randomGenerator.generate()\n printHandler(\"W\",self.name,\"worktime\",self.workTime)", "def test_get_sample_for_day(self):\n dates = [timezone.now() - timezone.timedelta(days=i) for i in range(5)]\n for date in dates:\n DHT.objects.create(station=self.a_dht_sample.station, humidity=A_HUMIDITY,\n temperature=self.a_dht_sample.temperature,\n heat_index=self.a_dht_sample.heat_index, date=date)\n\n samples_actual = get_samples_for_day(dates[2])\n\n num_sensors_expected = 8\n num_samples_expected = 1\n self.assertEqual(num_sensors_expected, len(samples_actual))\n for key in samples_actual['DHT']:\n self.assertEqual(num_samples_expected, len(samples_actual['DHT'][key]))", "def birthday_paradox(n, num_trials=10000):\n days = list(range(1, 366)) * 4\n days.append(366)\n same_bday_count = 0\n for _ in range(num_trials):\n birthdays = random.choices(days, k=n)\n if len(set(birthdays)) < len(birthdays):\n same_bday_count += 1\n return same_bday_count / num_trials", "def random_gen(self):\n\t\ttypes = [\"Normal\", \"Robot\", \"Ninja\", \"Fire\", \"Water\", \"Dinosaur\", \"Earth\", \"Sound\", \"Wind\", \"Darkness\", \"Light\", \"Plasma\", \"Solar\", \"Lunar\", \"Meme\", \"Magic\"]\n\t\tself._name_gen()\n\t\tself.speed = random.randint(1, 6) # All ranges here are balanced using eyeballs and hopes. And wishes.\n\t\tself.attk_pw = random.randint(0, 5)\n\t\tself.attk_type = random.choice(['physical', 'emotional'])\n\t\tself.moveType = random.choice(types)\n\t\tif self.attk_type == 'emotional':\n\t\t\tself.fp = random.randint(1, 5)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The used column for addresses in the derivation_paths table should be a zero or greater run of 1's, followed by a zero or greater run of 0's. There should be no used derivations after seeing a used derivation.
def check_addresses_used_contiguous(derivation_paths: List[DerivationPath]) -> List[str]: errors: List[str] = [] for wallet_id, dps in dp_by_wallet_id(derivation_paths).items(): saw_unused = False bad_used_values: Set[int] = set() ordering_errors: List[str] = [] # last_index = None # last_hardened = None for dp in dps: # _validate_args_addresses_used(wallet_id, last_index, last_hardened, dp) if saw_unused and dp.used == 1 and ordering_errors == []: ordering_errors.append( f"Wallet {dp.wallet_id}: " f"Used address after unused address at derivation index {dp.derivation_index}" ) if dp.used == 1: pass elif dp.used == 0: saw_unused = True else: bad_used_values.add(dp.used) # last_hardened = dp.hardened # last_index = dp.derivation_index if len(bad_used_values) > 0: errors.append(f"Wallet {wallet_id}: Bad values in 'used' column: {bad_used_values}") if ordering_errors != []: errors.extend(ordering_errors) return errors
[ "def check_unexpected_derivation_entries(\n self, wallets: List[Wallet], derivation_paths: List[DerivationPath]\n ) -> List[str]:\n\n errors = []\n wallet_id_to_type = {w.id: w.wallet_type for w in wallets}\n invalid_wallet_types = []\n missing_wallet_ids = []\n wrong_type = defaultdict(list)\n\n for d in derivation_paths:\n if d.wallet_type not in set(wt.value for wt in WalletType):\n invalid_wallet_types.append(d.wallet_type)\n if d.wallet_id not in wallet_id_to_type:\n missing_wallet_ids.append(d.wallet_id)\n elif d.wallet_type != wallet_id_to_type[d.wallet_id]:\n wrong_type[(d.hardened, d.wallet_id, d.wallet_type, wallet_id_to_type[d.wallet_id])].append(\n d.derivation_index\n )\n\n if len(invalid_wallet_types) > 0:\n errors.append(f\"Invalid wallet_types in derivation_paths table: {invalid_wallet_types}\")\n\n if len(missing_wallet_ids) > 0:\n errors.append(\n f\"Wallet IDs found in derivation_paths table, but not in users_wallets table: {missing_wallet_ids}\"\n )\n\n for k, v in wrong_type.items():\n errors.append(\n f\"\"\"{[\" \", \"un\"][int(k[0])]}hardened Wallet ID {k[1]} uses type {wallet_type_name(k[2])} in \"\"\"\n f\"derivation_paths, but type {wallet_type_name(k[3])} in wallet table at these derivation indices: {v}\"\n )\n\n return errors", "def estimate_indirect_connections():\n path = os.path.join(DATA_INTERMEDIATE, 'network_edges.shp')\n indirect_lut = gpd.read_file(path)\n\n output = []\n\n for idx, item in indirect_lut.iterrows():\n\n output.append({\n 'origin_id': item['origin_id'],\n 'dest_funth': item['dest_funth'],\n 'dest_func': item['dest_func'],\n 'dest_dist': item['dest_dist'],\n })\n\n output = pd.DataFrame(output)\n\n path = os.path.join(DATA_INTERMEDIATE, 'indirect_lut.csv')\n output.to_csv(path, index=False)", "def is_used(self, det):\n det_num = det.detNum\n if det_num is None:\n det_num = 0\n det_id = det._id\n if det_id is None:\n det_id = 0\n\n id = '%d:%d:%d' % (det.fieldId, det_num, det_id)\n return id in self.table", "def hasNonPortablePaths(self): #$NON-NLS-1$\r\n return len(self.elems) > 0", "def get_nodes_w_is_free(self):\n self.free_dict = nx.get_node_attributes(self.lattice, 'is_free')", "def _is_unused(self, path):\n for node in path:\n if node.ns.fingerprint in self._nodes_processing:\n return False\n return True", "def get_equivalent_pathway_dc_ids(decopath_ontology):\n sheets_dict = pd.read_excel(\n io=decopath_ontology,\n engine='openpyxl',\n sheet_name=None,\n usecols=[\n SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME,\n MAPPING_TYPE,\n TARGET_RESOURCE, TARGET_ID, TARGET_NAME,\n ],\n dtype=str,\n index_col=None,\n )\n\n sheets_dict.pop('equivalence_same_db', None)\n\n frames = [sheet for name, sheet in sheets_dict.items()]\n df = pd.concat(frames)\n\n # Remove kegg prefixes\n df[SOURCE_ID] = df[SOURCE_ID].str.replace('path:', '')\n df[TARGET_ID] = df[TARGET_ID].str.replace('path:', '')\n\n # Get equivalent pathways\n equivalence_df = df.loc[df[MAPPING_TYPE] == EQUIVALENT_TO]\n equivalent_pathways = equivalence_df[SOURCE_ID].to_list() + equivalence_df[TARGET_ID].to_list()\n\n # Get DC IDs for equivalent pathway\n id_to_dc_id = pd.DataFrame.from_dict({\n source_id: {'pathway_id': source_id, 'dc_id': target_id, 'dc_name': target_name}\n for source_db, source_id, source_name, mapping_type, target_db, target_id, target_name in df.values\n if mapping_type == IS_PART_OF\n if source_id in equivalent_pathways\n }, orient='index')\n\n id_to_dc_id = id_to_dc_id.reset_index(drop=True)\n\n return id_to_dc_id, df", "def are_disks_used(self):\n disks = [node.disk for node in self.nodes]\n for disk in disks:\n if disk:\n return True\n return False", "def requires_flush(path, path_used, was_multipath):\n # No used path happens on failed attachs, when we don't care about\n # individual flushes.\n if not path_used:\n return False\n\n path = os.path.realpath(path)\n path_used = os.path.realpath(path_used)\n\n # Need to flush this device if we used this specific path. We check\n # this before checking if it's multipath in case we don't detect it\n # being multipath correctly (as in bug #1897787).\n if path_used == path:\n return True\n\n # We flush individual path if Nova didn't use a multipath and we\n # replaced the symlink to a real device with a link to the decrypted\n # DM. We know we replaced it because it doesn't link to /dev/XYZ,\n # instead it maps to /dev/mapped/crypt-XYZ\n return not was_multipath and '/dev' != os.path.split(path_used)[0]", "def menu_dn_analysis(self):\n DNP = namedtuple('DNP', ['dn', 'partition'])\n dnps = [DNP._make((line1.directory_number, line1.partition))\n for phone in self.proxy.phones.list\n if (line1 := phone.lines.get(1))]\n\n def do_analysis(dnps: List[DNP]):\n \"\"\"\n Analysis of a set of DNs\n :param dnps:\n :return:\n \"\"\"\n # group DNs by len\n dn_by_len: Dict[int, List[str]] = defaultdict(list)\n for dnp in dnps:\n dn_by_len[len(dnp.dn)].append(dnp.dn)\n\n DNCluster = namedtuple('DNCluster', ['prefix', 'dns'])\n\n def find_clusters(prefix: str, digit_strings: List[str], total_count=None) -> List[Tuple[str, List[str]]]:\n if not prefix:\n total_count = len(digit_strings)\n if len(digit_strings[0]) <= 1:\n return []\n\n # determine DNs per next level digit\n first_digits = set()\n next_level_dns: Dict[str, List[str]] = defaultdict(set)\n for ds in digit_strings:\n first_digit = ds[0]\n first_digits.add(first_digit)\n next_level_dns[first_digit].add(ds[1:])\n first_digits = sorted(first_digits)\n total_count /= len(first_digits)\n for fd in first_digits:\n nld = sorted(next_level_dns[fd])[:10]\n output = [f'{prefix}{fd}-{ds}' for ds in nld]\n if len(next_level_dns[fd]) > 10:\n output.append('...')\n remaining_length = len(next(dn for dn in next_level_dns[fd]))\n density = 9 ** remaining_length\n\n print(\n f'prefix {prefix}-{fd}: {int(total_count)} {len(next_level_dns[fd])}/{density} digit strings: '\n f'{\", \".join(output)}')\n for fd in first_digits:\n find_clusters(prefix=f'{prefix}{fd}', digit_strings=list(next_level_dns[fd]),\n total_count=total_count)\n\n return []\n\n for dn_len in dn_by_len:\n print(f' len({dn_len}):')\n find_clusters('', dn_by_len[dn_len])\n return []\n\n # analysis of all DNS\n print('All DNs')\n do_analysis(dnps)\n\n dn_by_partition: Dict[str, List[DNP]] = defaultdict(list)\n for dnp in dnps:\n dn_by_partition[dnp.partition].append(dnp)\n\n # analysis by partition\n for partition in dn_by_partition:\n print(f'Partition \\'{partition}\\'')\n do_analysis(dn_by_partition[partition])", "def verify_dep_generation(self, ds, expected):\n dep_paths = set()\n for dep in ds:\n self.debug(dep)\n self.validate_bypass_dep(dep)\n if dep.attrs.get(\"pkg.debug.depend.fullpath\", None):\n dep_paths.update(\n dep.attrs[\"pkg.debug.depend.fullpath\"])\n else:\n # generate all paths this dep could represent\n for filename in dep.base_names:\n dep_paths.update([\n os.path.join(dir, filename)\n for dir in dep.run_paths])\n for item in expected:\n if item not in dep_paths:\n self.debug(\"Expected to see dependency on {0}\".format(\n item))\n return False\n return True", "def init_db_trace_directions():\n\n # TODO: potentially rewrite according to new dao API\n # for db_nodes in conf.cluster[\"public_ips\"]:\n #node_ids = [x[\"id\"] for x in conf.cluster[\"public_ips\"]]\n # Khalid Update: replace the above expression with new one\n node_ids = [x.id for x in dm.ndm.getAll() ]\n\n required_td_pairs = \\\n [p for p in [x for x in itertools.product(node_ids, node_ids) ] if p[0] != p[1]]\n\n print required_td_pairs\n existing = [ (x[1],x[2]) for x in rdm.getTraceDirections()]\n print existing\n\n for r in required_td_pairs:\n if r not in existing:\n\n td_id = rdm.insertTraceDirection(r[0], r[1])\n # rdm.insertPath(td_id, [ r[0], r[1] ] )\n\n\n\n # for src_node_id, dst_node_id in required_td_pairs:\n\n\n # print \"PAIR\", src_node_id, dst_node_id\n # td = dm.getTraceDirectionByIps(\n # dm.getEndNodeById(src_node_id)[\"ip\"],\n # dm.getEndNodeById(dst_node_id)[\"ip\"])\n\n # if not td:\n # log.warning(\"[DB] Insertin TraceDirection [%s->%s]\",\n # src_node_id, dst_node_id)\n # rdm.insertTraceDirection(src_node_id, dst_node_id)\n\n # # TODO: remove when we going to update run time state\n # dm._initTraceDirections()", "def segs2nodes(segs, logFileName, error = False):\r\n \"\"\" If no rows need calculating the code skips the calculation process and outputs the input dataframe, this is to be efficient when while looping through all the routines\"\"\"\r\n\r\n #This tests for rows that need calculating\r\n calcSegs = f.isnumber(segs) #Segments referenced to nodes that may need calculations \r\n\r\n if len(calcSegs) == 0: # There must be some rows that need calculation otherwise nothing is calculated\r\n return(segs) \r\n #only do calculations if we need to (there must be unclculated values in the base or top of the segment)\r\n elif calcSegs['baseTest'].sum() + calcSegs['topTest'].sum() > 0:\r\n #If base x,y,or z = NA AND ref is numeric and only only one number \r\n \r\n refSegs = segs[pd.isnull(segs['top x'])==False] #segments that have calculations for referencing\r\n\r\n #Move references over to ref cols and calculate x,y positions, copy complete rows to refSegs and repeat\r\n for i in calcSegs.index: #for each row that needs a caluclation if referenced to nodes\r\n for j in range(2): #this is for base vs top\r\n if j == 0:\r\n string = 'base'\r\n else:\r\n string = 'top'\r\n #If this is a node that needs calculating (is a node number depending on if base or top) \r\n if calcSegs.loc[i,'{0}Test'.format(string)] : #test variable asks if node is numeric using column created by f.isnumber above\r\n findName = calcSegs.loc[i,'{0} ref'.format(string)]\r\n if type(findName)!=str: #sometimes import from excel produces mixed variable types, need to convert to string\r\n findName = str(findName)\r\n calcSegs.at[i,'{0} ref'.format(string)] = findName\r\n if type(findName)==str:\r\n print(\"Reference variable at {0} of segment {1} converted to string\".format(string,calcSegs.loc[i,'name']))\r\n f.print2Log(logFileName,\"\\nReference variable at {0} of segment {1} converted to string\".format(string,calcSegs.loc[i,'name']))\r\n error = True\r\n else:\r\n print(\"Attempeted and failed to convert {0} of segment {1} to string\".format(string,calcSegs.loc[i,'name']))\r\n f.print2Log(logFileName,\"\\nAttempeted and failed to convert {0} of segment {1} to string\".format(string,calcSegs.loc[i,'name']))\r\n error = True\r\n \r\n nodeRow = refSegs[refSegs['top name'] == findName]\r\n\r\n if len(nodeRow) != 0: #skip if there is not matching node row and get it on the next pass\r\n if len(nodeRow)==1:\r\n nodeRow = nodeRow\r\n elif len(nodeRow) >1:\r\n #If they do match and none are 'mid' position then use top supplemental row \r\n if all(nodeRow['name'] == nodeRow['name'].iloc[0]) and sum(nodeRow['position']=='mid') > 0:\r\n midSegOuts = f.midSegTopLocator(nodeRow, logFileName, error) #get the most distal of the midsegment rows\r\n nodeRow = midSegOuts[0]\r\n error = midSegOuts[2]\r\n \r\n if len(nodeRow)==0:\r\n f.print2Log(logFileName,'Make sure that you lebelled supplemental measurements \"mid\" in the position column for segment {0}.'.format(findName))\r\n #If the node names do not match\r\n else: \r\n nodeRow = nodeRow.iloc[0]\r\n f.print2Log(logFileName,'\\nWarning: There were more than one node matches the ref \"{2}\" for the {0} of segment {1}, the first was used. If refencing to a segment with a supplemental measurement, make sure the position column says \"mid\" for supplemewntal rows'.format(string, calcSegs['name'].loc[i], nodeRow['top name'])) #.values\r\n error = True\r\n \r\n #Assign Referenece values\r\n RefX = float(nodeRow['top x'])\r\n RefY = float(nodeRow['top y'])\r\n RefRad = float(nodeRow['top radius'] )\r\n \r\n #set refs and position to node location baseded on top node of origin segment \r\n segs.loc[i,['{0} ref x'.format(string),'{0} x'.format(string)]] = RefX\r\n segs.loc[i,['{0} ref y'.format(string),'{0} y'.format(string)]] = RefY\r\n segs.at[i,'{0} ref radius'.format(string)] = RefRad\r\n \r\n #Calc x and y based on refs, dist, and azi\r\n posMeasures = [segs.loc[i,'{0} dist'.format(string)],segs.loc[i,'{0} azi'.format(string)],\r\n segs.loc[i,'{0} radius'.format(string)],segs.loc[i,'{0} ref type'.format(string)]]\r\n \r\n calcs = f.calcPosition(refVals = (RefX, RefY, RefRad), calcVals = posMeasures, calcType = 'segment')\r\n segs.at[i,'{0} x'.format(string)] = calcs['x']\r\n segs.at[i,'{0} y'.format(string)] = calcs['y']\r\n \r\n Z_offset = 0\r\n if string == 'base' and isinstance(calcSegs['notes'].loc[i], str): #If there is a note\r\n if 'from top' in calcSegs['notes'].loc[i]:\r\n Z_offset = float(RefRad)\r\n elif 'from bot' in calcSegs['notes'].loc[i]:\r\n Z_offset = -float(RefRad)\r\n\r\n segs.at[i,'base z'] = segs.loc[i,'base z'] + Z_offset\r\n \r\n if calcs['error'] == True:\r\n f.print2Log(logFileName,'Segment {0} reference assumed to be face to pith (reference to target)'.format(segs['name'].iloc[i]))\r\n error = calcs['error'] \r\n\r\n f.closingStatement(logFileName, error) \r\n return(segs)", "def build_dependencies(self):\n # First we find all the references and the exact location(s) in the config\n # that each reference ocurrs at\n self._find_references()\n # Next we determine if any of the things we refer to in the dependency\n # graph have backticks, meaning they must be evaluated before the\n # things that refer to them actually resolve their value\n for path in self.dep_graph.keys():\n key_seq = path.split('.')\n val = self.getfromseq(key_seq)\n if isinstance(val, str) and (val[0] == '`' and val[-1] == '`'):\n self.dep_graph[path].update({'evaluated': False})\n else:\n self.dep_graph[path].update({'evaluated': True})\n\n # Now we build out the \"refers_to\" entry for each reference to see if a\n # reference at one place in the table refers to some other value\n # For each reference we found\n for ref, data in self.dep_graph.items():\n # Loop through all the other references. If the above reference exists\n # in the \"ref_by\" table, we know the above reference refers to another\n # value and we need to resolve that value first. Note we also do this\n # for ref itself so we can catch circular references\n for other_ref, its_data in self.dep_graph.items():\n if ref in its_data['ref_by']:\n if other_ref == ref:\n raise ValueError('There is a circular reference in your'\n ' config file at %s' % ref)\n else:\n if 'ref_to' in data:\n data['ref_to'].append(other_ref)\n else:\n data['ref_to'] = [other_ref]\n # Nothing has been resolved at this poing\n data['resolved'] = False", "def _find_dofs(self):\n\n dofs = []\n for col in self.data.par_cols:\n if col not in self._axis_columns:\n if len(self.data.df[col].unique()) >= 2:\n dofs.append(col)\n self.log.debug(\"dofs = {}\".format(dofs))\n self._dofs = dofs", "def hail_ride_invalid(cls, gdf):\n check_name = \"hail_ride_invalid\"\n # list of stops not in correct admin areas by geo position.\n failed_nodes = ''\n rep.report_failing_nodes(gdf, check_name, failed_nodes)\n return failed_nodes", "def remove_redundant_paths(expand_paths, list_of_path, visited_stations_cost):\n\n\n for path_expanded in reversed(expand_paths):\n if path_expanded.last in visited_stations_cost:\n if visited_stations_cost[path_expanded.last] <= path_expanded.g :\n # expand_paths.remove(path_expanded) # NO ESTA CORRECTE AL 100% pero es lo mes optim que he trobat\n expand_paths.remove(path_expanded)\n else:\n visited_stations_cost[path_expanded.last] = path_expanded.g # NO ESTA CORRECTE AL 100% Pero es lo mes optim que he tobat\n for x in reversed(list_of_path):\n\n if path_expanded.last in x.route:\n # NO ESTA CORRECTE AL 100% Pero es lo mes optim que he tobat\n list_of_path.remove(x)\n else:\n visited_stations_cost[path_expanded.last] = path_expanded.g\n\n return expand_paths,list_of_path,visited_stations_cost\n\n pass", "def columns_used(self):\n return list(tz.unique(tz.concatv(\n util.columns_in_filters(self.fit_filters),\n util.columns_in_filters(self.predict_filters),\n util.columns_in_formula(self.default_model_expr),\n self._group.columns_used(),\n [self.segmentation_col])))", "def test_206_designate_bind_designate_relation(self):\n u.log.debug('Checking designate-bind:designate dns-backend relation'\n 'data...')\n unit = self.designate_bind_sentry\n relation = ['dns-backend', 'designate:dns-backend']\n expected = {\n 'private-address': u.valid_ip,\n 'rndckey': u.not_null,\n 'algorithm': 'hmac-md5',\n }\n\n ret = u.validate_relation_data(unit, relation, expected)\n if ret:\n message = u.relation_error('designate dns-backend', ret)\n amulet.raise_status(amulet.FAIL, msg=message)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for unexpected derivation path entries Invalid Wallet Type Wallet IDs not in table 'users_wallets' Wallet ID with different wallet_type
def check_unexpected_derivation_entries( self, wallets: List[Wallet], derivation_paths: List[DerivationPath] ) -> List[str]: errors = [] wallet_id_to_type = {w.id: w.wallet_type for w in wallets} invalid_wallet_types = [] missing_wallet_ids = [] wrong_type = defaultdict(list) for d in derivation_paths: if d.wallet_type not in set(wt.value for wt in WalletType): invalid_wallet_types.append(d.wallet_type) if d.wallet_id not in wallet_id_to_type: missing_wallet_ids.append(d.wallet_id) elif d.wallet_type != wallet_id_to_type[d.wallet_id]: wrong_type[(d.hardened, d.wallet_id, d.wallet_type, wallet_id_to_type[d.wallet_id])].append( d.derivation_index ) if len(invalid_wallet_types) > 0: errors.append(f"Invalid wallet_types in derivation_paths table: {invalid_wallet_types}") if len(missing_wallet_ids) > 0: errors.append( f"Wallet IDs found in derivation_paths table, but not in users_wallets table: {missing_wallet_ids}" ) for k, v in wrong_type.items(): errors.append( f"""{[" ", "un"][int(k[0])]}hardened Wallet ID {k[1]} uses type {wallet_type_name(k[2])} in """ f"derivation_paths, but type {wallet_type_name(k[3])} in wallet table at these derivation indices: {v}" ) return errors
[ "def validate_swap_path(whole_swap,swaps):\n\n balances = dict()\n\n src_token, dst_token, amount_in, amount_out, sender, receiver = whole_swap\n\n balances[src_token] = amount_in \n balances[dst_token] = - amount_out \n\n for src_token, dst_token, amount_in, amount_out, sender, receiver in swaps:\n\n if src_token not in balances:\n balances[src_token] = 0 \n if dst_token not in balances:\n balances[dst_token] = 0\n\n balances[src_token] = balances[src_token] - amount_in \n balances[dst_token] = balances[dst_token] + amount_out \n\n for key, value in balances.items():\n if value > 0:\n return False \n \n return True", "def test_invalid_change_add_debtor_address():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n del statement['addDebtors'][0]['address']\n del statement['addDebtors'][0]['partyId']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_change_delete_vehicle_type():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n del statement['deleteVehicleCollateral'][0]['type']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def checkWalletExistance(exist):\n def checkWallet(func):\n @wraps(func)\n def wrapper(*args,**kwargs):\n data=args[1].data;\n phone=data.get(\"phone\");\n found=getWallet(phone).get(\"exists\");\n if found==True and exist==False:\n raise WalletExist;\n elif found==False and exist==True:\n raise WalletNotExist;\n return func(*args,**kwargs);\n return wrapper;\n return checkWallet;", "def check_addresses_used_contiguous(derivation_paths: List[DerivationPath]) -> List[str]:\n errors: List[str] = []\n\n for wallet_id, dps in dp_by_wallet_id(derivation_paths).items():\n saw_unused = False\n bad_used_values: Set[int] = set()\n ordering_errors: List[str] = []\n # last_index = None\n # last_hardened = None\n for dp in dps:\n # _validate_args_addresses_used(wallet_id, last_index, last_hardened, dp)\n\n if saw_unused and dp.used == 1 and ordering_errors == []:\n ordering_errors.append(\n f\"Wallet {dp.wallet_id}: \"\n f\"Used address after unused address at derivation index {dp.derivation_index}\"\n )\n\n if dp.used == 1:\n pass\n elif dp.used == 0:\n saw_unused = True\n else:\n bad_used_values.add(dp.used)\n\n # last_hardened = dp.hardened\n # last_index = dp.derivation_index\n\n if len(bad_used_values) > 0:\n errors.append(f\"Wallet {wallet_id}: Bad values in 'used' column: {bad_used_values}\")\n if ordering_errors != []:\n errors.extend(ordering_errors)\n\n return errors", "def test_path3_1_0(self):\n self.assertTrue(self.paths_between_all_users(\"S10:2\", {'ownership': \"B\"}))", "def test_path4_3(self):\n self.assertTrue(self.paths_between_all_users(\"S5:3\", {'reliability': 3}))", "def test_invalid_change_missing_changetype():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n del statement['changeType']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_zip_code(self):\n\n invalid_zip_codes_to_test = [\"48066! \", \"Michigan\", \"4806689\", \"!48066%#$\", \"Roseville\", \"480366412\", \"41124112\", \"!@48021\"]\n \n for zip_code in invalid_zip_codes_to_test:\n self.database.zip_code = zip_code\n self.assertFalse(self.database.validate_zipCode())", "def _warn_if_invalid_testnet_wallet(self):\n is_old_bad = self._is_invalid_testnet_wallet()\n if is_old_bad:\n msg = \" \".join(\n [\n _(\"This testnet wallet has an invalid master key format.\"),\n _(\n f\"(Old versions of {PROJECT_NAME} before 3.3.6 produced invalid\"\n \" testnet wallets).\"\n ),\n \"<br><br>\",\n _(\n \"In order to use this wallet without errors with this version\"\n \" of EC, please <b>re-generate this wallet from seed</b>.\"\n ),\n \"<br><br><em><i>~SPV stopped~</i></em>\",\n ]\n )\n self.show_critical(msg, title=_(\"Invalid Master Key\"), rich_text=True)\n return is_old_bad", "def test_invalid_change_missing_regparty_address():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n del statement['registeringParty']['address']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_change_missing_basereg():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n del statement['baseRegistrationNumber']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_change_changetype():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n statement['changeType'] = 'XX'\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_change_delete_debtor_name():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n del statement['deleteDebtors'][0]['businessName']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_change_PD_missing_collateral():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n statement['changeType'] = 'PD'\n del statement['createDateTime']\n del statement['changeRegistrationNumber']\n del statement['payment']\n del statement['deleteVehicleCollateral']\n del statement['deleteGeneralCollateral']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_change_SU_missing_delete():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n statement['changeType'] = 'SU'\n del statement['createDateTime']\n del statement['changeRegistrationNumber']\n del statement['payment']\n del statement['deleteGeneralCollateral']\n del statement['deleteVehicleCollateral']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_invalid_derivation_method(self):\n payload = payloads.DeriveKeyRequestPayload()\n args = (payload, \"derivation_method\", \"invalid\")\n self.assertRaisesRegex(\n TypeError,\n \"Derivation method must be a DerivationMethod enumeration.\",\n setattr,\n *args\n )", "def test_path3_4(self):\n self.assertTrue(self.paths_between_all_users(\"User1:2\", {'ownership': \"B\"}))", "def test_invalid_change_DT_missing_delete():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n statement['changeType'] = 'DT'\n del statement['createDateTime']\n del statement['changeRegistrationNumber']\n del statement['payment']\n del statement['deleteDebtors']\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare a GPTJConfig with a finetuneanon GPTNeoConfig config and ensure they match. Required if loading a pretrained model
def finetuneanon_lm_config_check(config: GPTJConfig, finetuneanon_config: GPTNeoConfig): if finetuneanon_config.jax == False: raise ValueError( "GPTNeo model in https://github.com/finetuneanon/transformers is equivalent to gptj only with jax=True" ) if finetuneanon_config.rotary == False: raise ValueError( "GPTNeo model in https://github.com/finetuneanon/transformers is equivalent to gptj only if rotary embedding is used" ) for attn in finetuneanon_config.attention_layers: if attn != "global": raise ValueError( 'GPTNeo model in https://github.com/finetuneanon/transformers is equivalent to gptj only if "global" attention is used' ) attn_type = finetuneanon_config.attention_types[0][0] if attn_type != "global": raise ValueError( 'GPTNeo model in https://github.com/finetuneanon/transformers is equivalent to gptj only if "global" attention is used' ) params = [ ("hidden_size", config.hidden_size, finetuneanon_config.hidden_size), ("heads", config.attention.heads, finetuneanon_config.num_heads), ("layers", config.layers, finetuneanon_config.num_layers), ("vocab_size", config.embedding.real_vocab_size, finetuneanon_config.vocab_size), ("rotary_dim", config.attention.rotary_dim, finetuneanon_config.rotary_dim), ] if not all(xl == hf for _, xl, hf in params): not_eq_str = ", ".join(f"\n`{name}` not equal, config: {xl}, hf: {hf}" for name, xl, hf in params if xl != hf) raise ValueError( f"Config does not match the GPTNeo pre-trained model from https://github.com/finetuneanon/transformers. Not matching: {not_eq_str}" )
[ "def check_nn_config(f_config):\n\n if f_config[\"model_type\"] in [\"nrms\", \"NRMS\"]:\n required_parameters = [\n \"doc_size\",\n \"his_size\",\n \"user_num\",\n \"wordEmb_file\",\n \"word_size\",\n \"npratio\",\n \"data_format\",\n \"word_emb_dim\",\n # nrms\n \"head_num\",\n \"head_dim\",\n # attention\n \"attention_hidden_dim\",\n \"loss\",\n \"data_format\",\n \"dropout\",\n ]\n\n elif f_config[\"model_type\"] in [\"naml\", \"NAML\"]:\n required_parameters = [\n \"title_size\",\n \"body_size\",\n \"his_size\",\n \"user_num\",\n \"vert_num\",\n \"subvert_num\",\n \"wordEmb_file\",\n \"word_size\",\n \"npratio\",\n \"data_format\",\n \"word_emb_dim\",\n \"vert_emb_dim\",\n \"subvert_emb_dim\",\n # naml\n \"filter_num\",\n \"cnn_activation\",\n \"window_size\",\n \"dense_activation\",\n # attention\n \"attention_hidden_dim\",\n \"loss\",\n \"data_format\",\n \"dropout\",\n ]\n elif f_config[\"model_type\"] in [\"lstur\", \"LSTUR\"]:\n required_parameters = [\n \"doc_size\",\n \"his_size\",\n \"user_num\",\n \"wordEmb_file\",\n \"word_size\",\n \"npratio\",\n \"data_format\",\n \"word_emb_dim\",\n # lstur\n \"gru_unit\",\n \"type\",\n \"filter_num\",\n \"cnn_activation\",\n \"window_size\",\n # attention\n \"attention_hidden_dim\",\n \"loss\",\n \"data_format\",\n \"dropout\",\n ]\n elif f_config[\"model_type\"] in [\"npa\", \"NPA\"]:\n required_parameters = [\n \"doc_size\",\n \"his_size\",\n \"user_num\",\n \"wordEmb_file\",\n \"word_size\",\n \"npratio\",\n \"data_format\",\n \"word_emb_dim\",\n # npa\n \"user_emb_dim\",\n \"filter_num\",\n \"cnn_activation\",\n \"window_size\",\n # attention\n \"attention_hidden_dim\",\n \"loss\",\n \"data_format\",\n \"dropout\",\n ]\n else:\n required_parameters = []\n\n # check required parameters\n for param in required_parameters:\n if param not in f_config:\n raise ValueError(\"Parameters {0} must be set\".format(param))\n\n if f_config[\"model_type\"] in [\"nrms\", \"NRMS\", \"lstur\", \"LSTUR\"]:\n if f_config[\"data_format\"] != \"news\":\n raise ValueError(\n \"For nrms and naml model, data format must be 'news', but your set is {0}\".format(\n f_config[\"data_format\"]\n )\n )\n elif f_config[\"model_type\"] in [\"naml\", \"NAML\"]:\n if f_config[\"data_format\"] != \"naml\":\n raise ValueError(\n \"For nrms and naml model, data format must be 'naml', but your set is {0}\".format(\n f_config[\"data_format\"]\n )\n )\n\n check_type(f_config)", "def config_compare(self, config1, config2):\n return self.result_compare(\n self.driver.results_query(config=config1, objective_ordered=True)[0],\n self.driver.results_query(config=config2, objective_ordered=True)[0])", "def check_triton_and_model_analyzer_gpus(config):\n\n model_analyzer_gpus = get_analyzer_gpus(config)\n triton_gpus = get_triton_metrics_gpus(config)\n if set(model_analyzer_gpus) != set(triton_gpus):\n raise TritonModelAnalyzerException(\n \"'Triton Server is not using the same GPUs as Model Analyzer: '\"\n f\"Model Analyzer GPUs {model_analyzer_gpus}, Triton GPUs {triton_gpus}\"\n )", "def _CheckTPUEmbeddingConfig(\n self,\n tpu_embedding: tpu_embedding_lib.TPUEmbedding,\n table_to_config_dict: Mapping[str, tpu_embedding_lib.TableConfig],\n feature_to_config_dict: Mapping[str, tpu_embedding_lib.FeatureConfig],\n global_batch_size: int,\n ):\n\n def _Match(d1, d2, namedtuple_attrs_to_check):\n if len(d1) != len(d2):\n return False\n for k, v1 in d1.items():\n if k not in d2:\n return False\n v2 = d2[k]\n for attr in namedtuple_attrs_to_check:\n if getattr(v1, attr) != getattr(v2, attr):\n return False\n return True\n\n # We just check numeric/string settings for simplicity, this excludes things\n # like learning_rate_fn, optimization_parameters, etc since it's hard to\n # compare them.\n if not _Match(\n tpu_embedding.table_to_config_dict,\n table_to_config_dict,\n ['vocabulary_size', 'dimension', 'combiner'],\n ):\n raise ValueError(\n 'table_to_config_dict mismatch. '\n f'Expecting {tpu_embedding.table_to_config_dict}, '\n f'got {table_to_config_dict}'\n )\n if not _Match(\n tpu_embedding.feature_to_config_dict,\n feature_to_config_dict,\n ['table_id', 'max_sequence_length'],\n ):\n raise ValueError(\n 'feature_to_config_dict mismatch. '\n f'Expecting {tpu_embedding.feature_to_config_dict}, '\n f'got {feature_to_config_dict}'\n )\n if (\n tpu_embedding.batch_size_per_core * tpu_embedding.num_cores\n != global_batch_size\n ):\n raise ValueError(\n 'global_batch_size mismatch. '\n f'batch_size_per_core: {tpu_embedding.batch_size_per_core}, '\n f'num_cores: {tpu_embedding.num_cores}, '\n f'global_batch_size: {global_batch_size}'\n )", "def config_compare(self, config1, config2):\n return self.result_compare(self.driver.results_query(config=config1).one(),\n self.driver.results_query(config=config2).one())", "def check(config1, config2):\n return config1 == config2", "def check_config(config_dict: Dict[str, Any]) -> None:\n param_keys = [\"strength\", \"demo_path\", \"steps\"]\n for k in param_keys:\n if k not in config_dict:\n raise UnityTrainerException(\n \"The required pre-training hyper-parameter {0} was not defined. Please check your \\\n trainer YAML file.\".format(\n k\n )\n )", "def get_model_setting(finetune_config, model_config):\n cfg = finetune_config\n gpt2_net_cfg = model_config\n\n print(\"Loading GPT2 Model Config setting......\")\n print(\" | model size: {}\".format(cfg.gpt2_network))\n print(\" | batch_size: {}\".format(gpt2_net_cfg.batch_size))\n print(\" | seq_length: {}\".format(gpt2_net_cfg.seq_length))\n print(\" | vocab_size: {}\".format(gpt2_net_cfg.vocab_size))\n print(\" | d_model: {}\".format(gpt2_net_cfg.d_model))\n print(\" | num_hidden_layers: {}\".format(gpt2_net_cfg.num_hidden_layers))\n print(\" | num_attention_heads: {}\".format(gpt2_net_cfg.num_attention_heads))\n print(\" | hidden_dropout: {}\".format(gpt2_net_cfg.hidden_dropout))\n print(\" | attention_dropout: {}\".format(gpt2_net_cfg.attention_dropout))\n print(\" | summary_first_dropout: {}\\n\".format(gpt2_net_cfg.summary_first_dropout))", "def test_config():\n check_model_exist()\n test_suite = InferenceTest()\n test_suite.load_config(model_path=\"./resnet50_quant/resnet50_quant\")\n test_suite.config_test()", "def _checkModelConfig(self):\n if (self.modelConfig.__eq__('')):\n print('Debe cargar primero el archivo de configuración')\n self.statusBar().showMessage('Debe cargar primero el archivo de configuración')\n return False\n else:\n return True #true porque no esta vacio", "def data_and_simulation_are_consistent(config: OptimizerConfig) -> bool: # pragma: no cover\n\n data_var_names = sorted(config.data.inputs.keys())\n dataset_ok = True\n missing_names: Set[str] = set()\n data_output_name: Optional[str] = None\n if config.data.folder.is_dir() or config.data.files:\n df = config.data.load_dataframe()\n df_col_names = sorted(df.columns.tolist())\n data_output_name = config.data.output_column\n missing_names = set(data_var_names).union([data_output_name]).difference(df_col_names)\n if missing_names:\n logging.error(\n \"One or more columns expected by the config file are missing from the data: \"\n + \", \".join(sorted(missing_names))\n )\n try:\n Dataset(df, config.data)\n except ValueError as e:\n logging.error(f\"Constructing Dataset object raised a ValueError: {e}\")\n dataset_ok = False\n simulator = config.get_simulator()\n data_generator = SimulatedDataGenerator(simulator)\n simulation_var_names = sorted(data_generator.parameter_space.parameter_names)\n input_names_consistent = data_var_names == simulation_var_names\n if not input_names_consistent: # pragma: no cover\n logging.error(\"Inputs in the config file must match those of the data generator (simulator)\")\n logging.error(f\"Inputs in the config: {', '.join(data_var_names)}\")\n logging.error(f\"Inputs allowed by data generator: {', '.join(simulation_var_names)}\")\n simulation_output_name = data_generator.objective_col_name\n output_names_consistent = (data_output_name == simulation_output_name) or data_output_name is None\n if not output_names_consistent:\n logging.error(\"Output in the config file must match objective of the data generator (simulator)\")\n logging.error(f\"Output in the config: {data_output_name}\")\n logging.error(f\"Objective of the data generator: {simulation_output_name}\")\n return input_names_consistent and output_names_consistent and not missing_names and dataset_ok", "def test_build_config_json_inequality():\n config_a = BuildConfig(cmake_defs={\"A\": \"B\"})\n config_b = BuildConfig(kconfig_defs={\"CONFIG_A\": \"y\"})\n\n assert config_a.as_json() != config_b.as_json()", "def test_equal_true(self):\n config1 = Config({'foo': {'bar': 'baz'}})\n config2 = Config({'foo': {'bar': 'baz'}})\n self.assertTrue(config1 == config2)", "def initialize_config(self):\n\n def _logic(utterance: str) -> bool:\n \"\"\"\n Logic to be used by the logic-micromodel.\n \"\"\"\n return \"test\" in utterance.lower()\n\n configs = [\n {\n \"model_type\": \"svm\",\n \"name\": \"test_svm\",\n \"model_path\": os.path.join(self.model_path, \"test_svm\"),\n \"setup_args\": {\n \"training_data_path\": os.path.join(\n self.data_path, \"dog_vs_cat.json\"\n ),\n },\n },\n {\n \"model_type\": \"logic\",\n \"name\": \"test_logic\",\n \"model_path\": os.path.join(self.model_path, \"test_logic\"),\n \"setup_args\": {\"logic_func\": _logic},\n },\n {\n \"model_type\": \"bert_query\",\n \"name\": \"test_bert_query\",\n \"model_path\": os.path.join(self.model_path, \"test_bert_query\"),\n \"setup_args\": {\n \"threshold\": 0.8,\n \"seed\": [\n \"This is a test\",\n \"Arya is a hungry cat.\",\n ],\n \"infer_config\": {\n \"k\": 2,\n \"segment_config\": {\"window_size\": 5, \"step_size\": 3},\n },\n },\n },\n ]\n return configs", "def update_booltest_config(cfg_file_jsobj, booltest_config_jsobj):\n RT_KEY = 'randomness-testing-toolkit'\n BOOL_KEY = 'booltest'\n if RT_KEY not in cfg_file_jsobj:\n cfg_file_jsobj[RT_KEY] = {}\n if BOOL_KEY not in cfg_file_jsobj[RT_KEY]:\n cfg_file_jsobj[RT_KEY][BOOL_KEY] = {}\n\n for k in booltest_config_jsobj:\n cfg_file_jsobj[RT_KEY][BOOL_KEY][k] = booltest_config_jsobj[k]\n return cfg_file_jsobj", "def _same_configuration(self, sm2):\n return \\\n set([type(s) for s in self._stats]) == set([type(s) for s in sm2._stats]) and \\\n set([type(s) for s in self._derived_stats]) == set([type(s) for s in sm2._derived_stats]) and \\\n set([type(s) for s in self._special_stats]) == set([type(s) for s in sm2._special_stats])", "def check_config(config):\n parameters = [\n \"output_dir\",\n \"timestamp\",\n \"name\",\n \"bbox\",\n \"epsg\",\n \"cloud_coverage\",\n \"ndvi_year\",\n \"output_dir\",\n ]\n for par in parameters:\n assert par in config.keys(), f\"Parameter '{par}' missing in config file.\"", "def test_get_configs_from_multiple_files(self):\n temp_dir = self.get_temp_dir()\n\n # Write model config file.\n model_config_path = os.path.join(temp_dir, \"model.config\")\n model = model_pb2.DetectionModel()\n model.faster_rcnn.num_classes = 10\n _write_config(model, model_config_path)\n\n # Write train config file.\n train_config_path = os.path.join(temp_dir, \"train.config\")\n train_config = train_config = train_pb2.TrainConfig()\n train_config.batch_size = 32\n _write_config(train_config, train_config_path)\n\n # Write train input config file.\n train_input_config_path = os.path.join(temp_dir, \"train_input.config\")\n train_input_config = input_reader_pb2.InputReader()\n train_input_config.label_map_path = \"path/to/label_map\"\n _write_config(train_input_config, train_input_config_path)\n\n # Write eval config file.\n eval_config_path = os.path.join(temp_dir, \"eval.config\")\n eval_config = eval_pb2.EvalConfig()\n eval_config.num_examples = 20\n _write_config(eval_config, eval_config_path)\n\n # Write eval input config file.\n eval_input_config_path = os.path.join(temp_dir, \"eval_input.config\")\n eval_input_config = input_reader_pb2.InputReader()\n eval_input_config.label_map_path = \"path/to/another/label_map\"\n _write_config(eval_input_config, eval_input_config_path)\n\n configs = config_util.get_configs_from_multiple_files(\n model_config_path=model_config_path,\n train_config_path=train_config_path,\n train_input_config_path=train_input_config_path,\n eval_config_path=eval_config_path,\n eval_input_config_path=eval_input_config_path)\n self.assertProtoEquals(model, configs[\"model\"])\n self.assertProtoEquals(train_config, configs[\"train_config\"])\n self.assertProtoEquals(train_input_config,\n configs[\"train_input_config\"])\n self.assertProtoEquals(eval_config, configs[\"eval_config\"])\n self.assertProtoEquals(eval_input_config, configs[\"eval_input_configs\"][0])", "def test_two_sources(self):\n config = config_parser()\n self._setup_comp1(config)\n self._setup_comp2(config)\n self._setup_externals_description(config)\n model = ExternalsDescriptionConfigV1(config)\n print(model)\n self._check_comp1(model)\n self._check_comp2(model)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enumerate over an iterable in reverse order while retaining proper indexes
def reverse_enumerate(iterable): # Lifted from http://galvanist.com/post/53478841501/python-reverse-enumerate return itertools.izip(reversed(xrange(len(iterable))), reversed(iterable)) # Alternative python3 version: # return zip(reversed(range(len(iterable))), reversed(iterable))
[ "def reverse_enumerate(iterable):\n return izip(reversed(range(len(iterable))), reversed(iterable))", "def reversed_enumerate(seq):\r\n return izip(reversed(xrange(len(seq))), reversed(seq))", "def rev_enumerate(seq):\n cnt = 0\n seq = reverse(seq)\n for i in seq:\n yield len(seq)-cnt-1, i\n cnt += 1", "def reversedEnumerate(l):\n return zip(range(len(l)-1, -1, -1), l[::-1])", "def reverse(iterator):\n for i in iterator:\n yield from reverse(iterator)\n yield i", "def reviter(self):\n for b in self.blocks[::-1]:\n yield b", "def test_reversed_enumeration(self):\n test_list = range(10)\n expected = [\n (0, 9), (-1, 8), (-2, 7), (-3, 6), (-4, 5),\n (-5, 4), (-6, 3), (-7, 2), (-8, 1), (-9, 0)\n ]\n result = [l for l in reverse_enumerate(test_list)]\n self.assertEquals(expected, result)", "def __reversed__(self):\n current=self.last()\n while current is not None:\n yield current.element()\n current=self.before(current)", "def test_reversed_enumeration_option_params(self):\n test_list = range(10)\n expected = [\n (9, 9), (8, 8), (7, 7), (6, 6), (5, 5),\n (4, 4), (3, 3), (2, 2), (1, 1), (0, 0)\n ]\n result = [l for l in reverse_enumerate(test_list, 9)]\n self.assertEquals(expected, result)", "def elements_reversed(seq):\n return seq[::-1]", "def __reversed__(self):\n for child in reversed(self.child_list):\n yield child", "def _do_reverse_IterRankDifferentiaZip(\n self: \"HereditaryStratumOrderedStoreTree\",\n # deposition ranks might not be stored in strata\n get_rank_at_column_index: typing.Optional[typing.Callable] = None,\n start_column_index: int = 0,\n ) -> typing.Iterator[typing.Tuple[int, int]]:\n for reverse_column_idx, node in enumerate(self._GetAscendingIter()):\n column_idx = self.GetNumStrataRetained() - 1 - reverse_column_idx\n if column_idx >= start_column_index:\n rank: int\n if get_rank_at_column_index is None:\n rank = node.stratum.GetDepositionRank()\n assert rank is not None\n else:\n rank = get_rank_at_column_index(column_idx)\n assert rank is not None\n yield (rank, node.stratum.GetDifferentia())\n else:\n break", "def __reversed__(self):\n if self.trajectory is not None:\n return reversed(self.trajectory)\n else:\n return [] # empty iterator", "def traceReverse(self, r):\n if self.skip:\n return r\n for item in reversed(self.items):\n if not item.skip:\n r = item.traceReverse(r)\n return r", "def revmembers(self):\r\n return self.zrevrange(0, -1)", "def _reverse(self):\n o = self.copy()\n # Clear ok reversed flag\n o._reversed = not o._reversed\n\n if o.bits == 8:\n # No need for reversing\n return o.copy()\n\n if o.is_top:\n # A TOP is still a TOP after reversing\n si = o.copy()\n return si\n\n else:\n if not o.is_integer:\n # We really don't want to do that... but well, sometimes it just happens...\n logger.warning(\"Reversing a real strided-interval %s is bad\", self)\n\n # Reversing an integer is easy\n rounded_bits = ((o.bits + 7) // 8) * 8\n list_bytes = []\n si = None\n\n for i in range(0, rounded_bits, 8):\n b = o._unrev_extract(min(i + 7, o.bits - 1), i)\n list_bytes.append(b)\n\n for b in list_bytes:\n si = b if si is None else si.concat(b)\n si.uninitialized = self.uninitialized\n si._reversed = o._reversed\n return si", "def deep_reverse(L):\n L.reverse()\n for i in L:\n i.reverse()", "def _fragment_in_reverse(iterable, start=0, limit=None):\n # TODO: Naive implementation. Needs to be rewritten to a solution with\n # file pointer moving around etc. if it turns out that this isn't\n # performant enough.\n maxlen = None\n if limit is not None:\n maxlen = start + limit\n\n fragment = collections.deque(iterable, maxlen)\n try:\n for _ in range(start):\n fragment.pop()\n except IndexError:\n raise exc.InvalidInputError(__name__,\n 'Index start=%s is out of range.'\n % str(start))\n\n fragment.reverse()\n return fragment", "def __reversed__(self):\n return reversed(self.outcomes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an object to do various processing with a reddit thread (rendering to different formats). thread_id is the optional id of the reddit submission to squash (check the URL). If thread_id is not None, the thread will be remotely fetched and parsed from reddit (this can easily take an hour if the number of comments exceeds a few thousand). json is an optional cached/preparsed version of the thread. Equivalent to initializing with a thread_id, and saving self.json to a file
def __init__(self, thread_id=None, json=None, author_map=None, characters=None): self._thread = None self.thread_id = thread_id self.comment_data = None self.author_map = author_map or {} self.characters = characters or {} self.commentlist = [] # Create a handle for accessing reddit, and load the thread self.reddit = praw.Reddit(user_agent='github.com/wallacoloo/reddit-roleplay-assembler') if json is not None: self.comment_data = globals()["json"].loads(json) if self.comment_data is None and thread_id is not None: # Many functions recurse through the comment chain, so set a high recursion limit sys.setrecursionlimit(5*self.thread.num_comments+1000) # Expand all comments (this will take some time!) self.thread.comments.replace_more(limit=None, threshold=0) #note it's probably a good idea to loop and handle exceptions, they say... # Remove all but the main thread of comments max_depth = self.max_comment_depth() self.filter_comments_by_max_depth(max_depth, self.thread.comments) # There may still be comment forks near the end that have the same length # We need to drop everything after the fork, as we don't know which of the choices is the main discussion print 'got ', len(self.commentlist) self.comment_data = self.comments_to_dicts(self.commentlist) print 'dicts: ', len(self.comment_data)
[ "def parse_json(board, json_):\n if 'posts' not in json_ or not json_['posts']:\n raise ValueError('Thread does not contain any posts')\n\n first = json_['posts'][0]\n\n return Thread(board,\n first['no'],\n util.unescape_html(first['sub'])\n if 'sub' in first else None,\n Thread._find_subject(first),\n first['semantic_url'],\n [Post.parse_json(board, post)\n for post in json_['posts']])", "def thread_to_json(thread):\n authors = thread.get_authors()\n subject = thread.get_subject()\n return {\n \"authors\": authors if authors else \"\",\n \"matched_messages\": thread.get_matched_messages(),\n \"newest_date\": thread.get_newest_date(),\n \"oldest_date\": thread.get_oldest_date(),\n \"subject\": subject if subject else \"\",\n \"tags\": list(thread.get_tags()),\n \"thread_id\": thread.get_thread_id(),\n \"total_messages\": thread.get_total_messages(),\n }", "def crawl(thread_url):\n\tbase_url = \"https://np.reddit.com\"\n\tcomment_container = list()\n\treq = request.Request(base_url+thread_url, \n \tdata=None, \n \theaders={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n \t})\n\t\n\tcontent = request.urlopen(req).read()\n\tthread_name = thread_url.split(\"/\")[-2]+\".html\"\n\n\t# Saving as html\n\twith open(thread_name,\"w\") as txt:\t\n\t\ttxt.writelines(str(content))\n\n\t# Opening the html from disk\n\twith open(thread_name) as html:\n\t\tsoup = BeautifulSoup(html, \"html.parser\")\n\t\ts = soup.find_all(\"div\", {\"class\",\"content\"})\n\t\tif s:\n\t\t\ts = s[0].find_all(\"div\", id=lambda x: x and x.startswith('thing_t1_'))\n\t\t\tfor _s in s:\n\t\t\t\t# Getting the user that has posted the comment\n\t\t\t\tuser = _s[\"data-author\"]\n\t\t\t\t\n\t\t\t\t# Getting the text of the comment\n\t\t\t\ttext = _s.find(\"div\", {\"class\":\"md\"}).text\n\t\t\t\t# Need to do replacements to get the correct output\n\t\t\t\ttext = text.replace(\"\\\\xc3\\\\xa5\",\"å\").replace(\"\\\\xc3\\\\xb8\",\"ø\").replace(\"\\\\xc3\\\\xa6\",\"æ\")\n\t\t\t\t\n\t\t\t\t# Datetime for comment\t\t\t\n\t\t\t\ttime = _s.find(\"time\", {\"class\":\"live-timestamp\"})\n\t\t\t\ttime = time[\"datetime\"]\n\n\t\t\t\t# Link to comment\n\t\t\t\tlink = base_url+_s[\"data-permalink\"]\n\n\t\t\t\tcomment_container.append(Comment(user,text,time,link))\n\n\treturn comment_container", "def get_comment_thread_by_id(\n self,\n *,\n comment_thread_id: Union[str, list, tuple, set],\n parts: Optional[Union[str, list, tuple, set]] = None,\n text_format: Optional[str] = \"html\",\n return_json: Optional[bool] = False,\n ):\n\n args = {\n \"id\": enf_comma_separated(\"comment_thread_id\", comment_thread_id),\n \"part\": enf_parts(resource=\"commentThreads\", value=parts),\n \"textFormat\": text_format,\n }\n\n resp = self._request(resource=\"commentThreads\", method=\"GET\", args=args)\n data = self._parse_response(resp)\n\n if return_json:\n return data\n else:\n return CommentThreadListResponse.from_dict(data)", "def from_url(url, session=None):\n # extract the board and thread ids\n result = re.search(r'boards\\.4chan\\.org/([a-z]+)/thread/([0-9]+)', url)\n if not result:\n raise ValueError('Invalid thread URL: {0}'.format(url))\n\n # construct a session if necessary\n if not session:\n session = util.create_session()\n\n # determine the URL\n api_url = 'https://a.4cdn.org/{0}/thread/{1}.json'.format(\n result.group(1), result.group(2))\n\n # download the JSON\n logger.debug('Retrieving JSON from %s', api_url)\n response = session.get(api_url)\n if response.status_code != requests.codes.ok:\n raise IOError('Request to 4chan failed with status code {0}'.format(\n response.status_code))\n try:\n return Thread.parse_json(result.group(1), response.json())\n except ValueError as e:\n raise IOError('Error parsing 4chan response: {0}'.format(e))", "def __addThreadAndPosts(self):\n self.__genre = \"Review\"\n self.__hierarchy = []\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.utcnow()\n , \"%Y-%m-%dT%H:%M:%SZ\"), \n 'pickup_date':datetime.strftime(datetime.utcnow(), \n \"%Y-%m-%dT%H:%M:%SZ\"), \n 'connector_instance_log_id': \\\n self.task.connector_instance_log_id, \n 'connector_instance_id':\n self.task.connector_instance_id, \n 'workspace_id':self.task.workspace_id, \n 'client_id':self.task.client_id, \n 'client_name':self.task.client_name, \n 'versioned':False, \n 'category':self.task.instance_data.get('category',''), \n 'task_log_id':self.task.id }\n self.__setParentPage()\n question_post = self.soup.find('div', id=re.compile('^edit.*?'))\n self.__addPost(question_post, True)\n self.__goToLastPage()\n while self.__iteratePosts():\n try:\n next_page_uri = self.soup.find('a', text='&lt;',rel='prev').parent['href']\n data_dict = dict(parse_qsl(next_page_uri.split('?')[-1]))\n if 's' in data_dict.keys():\n data_dict.pop('s')\n self.currenturi = self.__baseuri + 'showthread.php?'+ urlencode(data_dict) \n self.__setSoupForCurrentUri()\n except:\n log.exception(self.log_msg('Next Page link not found for url \\\n %s'%self.currenturi))\n break\n return True", "def parse(cls, data, reddit):\n if data['author']:\n data['author'] = Redditor(reddit, data['author'])\n\n if data['dest'].startswith('#'):\n data['dest'] = Subreddit(reddit, data['dest'][1:])\n else:\n data['dest'] = Redditor(reddit, data['dest'])\n\n if data['replies']:\n replies = data['replies']\n data['replies'] = reddit._objector.objectify(\n replies['data']['children'])\n else:\n data['replies'] = []\n\n if data['subreddit']:\n data['subreddit'] = Subreddit(reddit, data['subreddit'])\n return SubredditMessage(reddit, _data=data)\n\n return cls(reddit, _data=data)", "def create(cls, user_id, thread_id):\n\n instance_id = cls._generate_id(user_id, thread_id)\n if cls.get_by_id(instance_id):\n raise Exception('Unique reply-to ID for given user and thread'\n ' already exists.')\n\n reply_to_id = cls._generate_unique_reply_to_id()\n return cls(id=instance_id, reply_to_id=reply_to_id)", "def parse_thread_level_items(page_one_html, board_id, thread_id):\n # Values we were given can be dropped in as-is.\n thread['board_id'] = board_id\n thread['thread_id'] = thread_id\n\n # Get the thread title\n thread_title_path = 'h2 > a'\n thread_title_element = d(thread_title_path)\n assert(thread_title_element)\n thread_title = thread_title_element.text()\n thread['title'] = thread_title\n\n\n\n # Check if locked\n if ('title=\"This topic is locked, you cannot edit posts or make further replies.\"' in page_html):\n thread['locked'] = True\n else:\n thread['locked'] = False\n\n return thread", "def get_random_post(self, board=None):\n #choose a random board if you didn't specify any\n if board is None:\n board = random.choice(self.config[\"boards\"])\n\n data = []\n \"this code gets all the threads in the specified board\"\n with urllib.request.urlopen(\"http://a.{}/{}/threads.json\".format(self.config[\"chanDomain\"], board)) as page:\n data = json.loads(page.read().decode(\"utf-8\"))\n\n post_numbers = []\n for page in data:\n for thread in page[\"threads\"]:\n post_numbers.append(thread[\"no\"])\n\n #chooses a random thread\n no = random.choice(post_numbers)\n with urllib.request.urlopen(\"http://a.{}/{}/thread/{}.json\".format(self.config[\"chanDomain\"], board, no)) as page:\n data = json.loads(page.read().decode(\"utf-8\"))\n\n #gets a random post from the random thread\n chosen_one = []\n\n #A post without an image is boring\n while len(data[\"posts\"]) > 0:\n chosen_one = random.choice(data[\"posts\"])\n data[\"posts\"].remove(chosen_one)\n if \"filename\" in chosen_one:\n break\n\n com = \"\"\n if \"com\" in chosen_one:\n com = chosen_one[\"com\"]\n\n #downloads the image in memory\n self.logger.debug(\"PostSifter: {}\".format(chosen_one))\n self.logger.info(\"PostSifter: http://i.{}/{}/{}{}\".format(self.config[\"chanDomain\"], board, chosen_one[\"tim\"], chosen_one[\"ext\"]))\n\n with urllib.request.urlopen(\"http://i.{}/{}/{}{}\".format(self.config[\"chanDomain\"], board, chosen_one[\"tim\"], chosen_one[\"ext\"])) as page:\n return Post(page.read(), com)", "def post_message_wall_thread(userid, title, json_model, wikia_php, api_php,\n session=None):\n try:\n new_wikia_php = requests.models.PreparedRequest()\n new_wikia_php.prepare_url(wikia_php, {\n \"controller\": \"Fandom\\\\MessageWall\\\\MessageWall\",\n \"method\": \"createThread\",\n \"format\": \"json\",\n })\n\n request = (session or requests.Session()).post(new_wikia_php.url, data={\n \"title\": title,\n \"wallOwnerId\": userid,\n \"token\": get_csrf_token(api_php, session),\n # \"rawcontent\": \"\",\n \"jsonModel\": json_model,\n \"attachments\":\n \"{\\\"contentImages\\\":[],\\\"openGraphs\\\":[],\\\"atMentions\\\":[]}\"\n })\n request.raise_for_status()\n\n data = request.json()\n except (requests.exceptions.HTTPError, json.decoder.JSONDecodeError):\n raise QueryException()\n\n try:\n # No clean way to determine if operation was successful\n return \"id\" in data and int(data[\"createdBy\"][\"id\"]) == userid\n except (KeyError, ValueError):\n # Missing success-condition key/value pairs indicate input was faulty\n raise InputException()", "def _reconstruct_thread(comment, parents):\n id = comment['_id']\n thread = {\n 'id': id,\n 'user_id': comment['user_id'],\n 'children': []\n }\n children = parents[id]\n for reply in sorted(children, key=lambda c: c['date_created']):\n thread['children'].append(_reconstruct_thread(reply, parents))\n return thread", "def assemble(options, output_file):\n\n n_threads = 0\n n_posts = 0\n\n with codecs.open(options.forum_file, \"r\", \"utf-8\") as json_forum_data:\n json_forums = json.load(json_forum_data)\n\n with codecs.open(options.thread_file, \"r\", \"utf-8\") as json_thread_data:\n json_threads = json.load(json_thread_data)\n\n with codecs.open(options.post_file, \"r\", \"utf-8\") as json_post_data:\n json_posts = json.load(json_post_data)\n\n posts_by_thread_id = {}\n thread_ids = []\n\n for post in json_posts:\n if post[\"thread\"] not in posts_by_thread_id:\n posts_by_thread_id[post[\"thread\"]] = []\n posts_by_thread_id[post[\"thread\"]].append(post)\n if post[\"thread\"] not in thread_ids:\n thread_ids.append(post[\"thread\"])\n\n threads_by_forum_id = {}\n forum_ids = []\n threads_by_id = {}\n\n for thread in json_threads:\n threads_by_id[thread[\"identifier\"]] = thread\n if thread[\"forum\"] not in threads_by_forum_id:\n threads_by_forum_id[thread[\"forum\"]] = []\n threads_by_forum_id[thread[\"forum\"]].append(thread)\n if thread[\"forum\"] not in forum_ids:\n forum_ids.append(thread[\"forum\"])\n\n forums_by_id = {}\n for forum in json_forums:\n forums_by_id[forum[\"identifier\"]] = forum\n\n progress = ProgressBar()\n\n for forum in progress(json_forums):\n if forum[\"identifier\"] not in forum_ids:\n continue\n\n forum[\"threads\"] = []\n\n if forum[\"identifier\"] not in threads_by_forum_id:\n continue\n\n for thread in threads_by_forum_id[forum[\"identifier\"]]:\n thread[\"posts\"] = []\n\n if thread[\"identifier\"] not in posts_by_thread_id:\n continue\n\n for post in posts_by_thread_id[thread[\"identifier\"]]:\n thread[\"posts\"].append(post)\n\n forum[\"threads\"].append(thread)\n\n for forum in json_forums:\n # empty threads (due to indesirable first and last post dates) are removed\n to_be_removed = []\n\n for thread in forum[\"threads\"]:\n del thread[\"forum\"]\n\n if len(thread[\"posts\"]) == 0:\n to_be_removed.append(thread)\n\n for post in thread[\"posts\"]:\n if \"thread\" in post:\n del post[\"thread\"]\n\n n_posts += len(thread[\"posts\"])\n\n for thread in to_be_removed:\n forum[\"threads\"].remove(thread)\n\n n_threads += len(forum[\"threads\"])\n\n with io.open(output_file, \"w\", encoding=\"utf-8\") as f:\n f.write(unicode(json.dumps(json_forums, ensure_ascii=False)))\n\n print(\"Assembled map exported to \" + output_file)\n print(\"{0} forums, {1} threads and {2} posts scraped\".format(len(json_forums), n_threads, n_posts))", "def get_askreddit_threads():\n\n print_step(\"Getting AskReddit threads...\")\n\n content = {}\n load_dotenv()\n reddit = praw.Reddit(\n client_id=os.getenv(\"REDDIT_CLIENT_ID\"),\n client_secret=os.getenv(\"REDDIT_CLIENT_SECRET\"),\n user_agent=\"Accessing AskReddit threads\",\n username=os.getenv(\"REDDIT_USERNAME\"),\n password=os.getenv(\"REDDIT_PASSWORD\"),\n )\n askreddit = reddit.subreddit(\"askreddit\")\n threads = askreddit.hot(limit=25)\n submission = list(threads)[random.randrange(0, 25)]\n print_substep(f\"Video will be: {submission.title} :thumbsup:\")\n try:\n\n content[\"thread_url\"] = submission.url\n content[\"thread_title\"] = submission.title\n content[\"comments\"] = []\n\n for top_level_comment in submission.comments:\n content[\"comments\"].append(\n {\n \"comment_body\": top_level_comment.body,\n \"comment_url\": top_level_comment.permalink,\n \"comment_id\": top_level_comment.id,\n }\n )\n\n except AttributeError as e:\n pass\n print_substep(\"Received AskReddit threads Successfully.\", style=\"bold green\")\n return content", "def simple_save_thread(db_ses, req_ses, SimplePosts, board_name, thread_num, dl_dir):\n logging.info(u'Fetching thread: {0!r}'.format(thread_num))\n # Calculate values\n thread_url = u'https://warosu.org/{bn}/thread/{tn}'.format(bn=board_name, tn=thread_num)\n thread_filename = 'warosu.{bn}.{tn}.html'.format(bn=board_name, tn=thread_num)\n thread_filepath = os.path.join(dl_dir, u'{0}'.format(board_name), thread_filename)\n logging.debug(u'thread_url={0!r}'.format(thread_url))\n\n # Look for all posts for this thread in DB\n logging.debug('About to look for existing posts for this thread')\n existing_posts_q = db_ses.query(SimplePosts)\\\n .filter(SimplePosts.thread_num == thread_num,)\n existing_posts = existing_posts_q.all()\n logging.debug(u'existing_posts={0!r}'.format(existing_posts))\n logging.debug(u'len(existing_posts)={0!r}'.format(len(existing_posts)))\n\n # Load thread\n thread_res = common.fetch( requests_session=req_ses, url=thread_url, )\n thread_html = thread_res.content\n # Save for debugging/hoarding\n logging.debug(u'thread_filepath={0!r}'.format(thread_filepath))\n common.write_file(file_path=thread_filepath, data=thread_res.content)# Store page to disk\n\n # Find posts\n posts = thread_parsers.split_thread_into_posts(html=thread_html)\n logging.debug(u'len(posts)={0!r}'.format(len(posts)))\n for post_html in posts:# Process each post\n # Get post num and subnum (Num is post ID, subnum is ghost ID thing)\n num_s, subnum_s = thread_parsers.num_subnum(fragment=post_html)\n num = int(num_s)\n subnum = int(subnum_s)\n # Detect if ghost post\n is_ghost = (subnum != 0)# subnum is 0 for regular replies, positive for ghost replies\n if (not is_ghost):# Skip post if not ghost\n logging.debug(u'Skipping regular reply: thread_num={0!r}, num={1!r}, subnum={2!r}'.format(thread_num, num, subnum))\n continue\n logging.debug(u'Found ghost reply: reply: thread_num={0!r}, num={1!r}, subnum={2!r}'.format(thread_num, num, subnum))\n # Check if post is already in DB\n post_is_in_db = is_post_in_results(results=existing_posts, thread_num=thread_num,\n num=num, subnum=subnum)\n if (post_is_in_db):\n logging.debug(u'Post already saved: thread_num={0!r}, num={1!r}, subnum={2!r}'.format(thread_num, num, subnum))\n else:\n logging.debug('About to insert ghost post')\n # Add post to DB\n new_simplepost = SimplePosts(\n num = num,\n subnum = subnum,\n thread_num = thread_num,\n post_html = post_html,\n )\n db_ses.add(new_simplepost)\n logging.info(u'Inserted a ghost post into SimplePosts')\n logging.info(u'Fetched thread: {0!r}'.format(thread_num))\n return", "def parse_thread(self):\n\n req = requests.get(self.thread)\n if req.status_code == 200:\n data = BeautifulSoup(req.content, \"html.parser\")\n post_messages = data.find(id=\"posts\").find_all(\"li\", recursive=False)\n post_messages = list(filter(None, map(lambda x: self._parse_post_message(x), post_messages)))\n\n \n #for post in post_messages[-3:]:\n # print(\"{} - {} - Post {}\\n{}\\n\".format(colored(post['username'], 'green'), post['date'], post[\"postcounter\"], colored(post['message'], 'yellow')))\n self.post_messages = post_messages\n self._write_location()\n else:\n print(\"Something's wrong, check the thread link.\")", "def url_thread(self, message_id, output=''):\n return self._url('/thread/=%s/' % message_id, output)", "async def get_threads(\n self, pr_id: str, filename: Optional[str] = None\n ) -> List[dict]:\n git_url = url_path_join(pr_id, \"/comments\")\n if filename is None:\n results = await self._call_github(git_url.replace(\"pulls\", \"issues\"))\n return [\n {\n \"id\": result[\"id\"],\n \"comments\": [GitHubManager._response_to_comment(result)],\n \"pullRequestId\": pr_id,\n }\n for result in results\n ]\n else:\n results = await self._call_github(git_url)\n\n threads = []\n replies = []\n for result in results:\n if result[\"path\"] == filename:\n if \"in_reply_to_id\" in result:\n replies.append(result)\n else:\n threads.append([result])\n\n has_changed = True\n while len(replies) > 0 and has_changed:\n has_changed = False\n for reply in replies.copy():\n for comments in threads:\n if comments[-1][\"id\"] == reply[\"in_reply_to_id\"]:\n comments.append(reply)\n replies.remove(reply)\n has_changed = True\n\n return [\n {\n \"id\": thread[-1][\"id\"], # Set discussion id as the last comment id\n \"comments\": [GitHubManager._response_to_comment(c) for c in thread],\n \"filename\": filename,\n \"line\": thread[0][\"line\"],\n \"originalLine\": thread[0][\"original_line\"]\n if thread[0][\"line\"] is None\n else None,\n \"pullRequestId\": pr_id,\n }\n for thread in threads\n ]", "def __addThreadAndPosts(self):\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'pickup_date':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'connector_instance_log_id': self.task.connector_instance_log_id,\n 'connector_instance_id':self.task.connector_instance_id,\n 'workspace_id':self.task.workspace_id,\n 'client_id':self.task.client_id,\n 'client_name':self.task.client_name,\n 'versioned':False,\n 'category':self.task.instance_data.get('category',''),\n 'task_log_id':self.task.id }\n if not self.__addQuestionInfo():\n log.info(self.log_msg('No Questions found'))\n return False\n self.__goToLastPage()\n while self.__iteratePosts():\n try:\n self.currenturi = 'http://forums.webmd.com' + self.soup.findAll('img', alt='Previous')[1].parent['href']\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('No Previous URL found for url \\\n %s'%self.currenturi))\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a comment (defaults to thread root), find the maximum depth of its descendents
def max_comment_depth(self, comment=None, cur_depth=0): if comment is None: comment = self.thread replies = comment.replies if isinstance(comment, praw.models.Comment) else \ (comment.comments if isinstance(comment, praw.models.Submission) else None) if replies: return max(self.max_comment_depth(reply, cur_depth=cur_depth+1) for reply in replies) else: return cur_depth
[ "def _find_max_depth(root):\r\n \r\n if root.children() == {}:\r\n return 1\r\n else:\r\n return 1 + max([_find_max_depth(child)\r\n for child in root.children().values()])", "def filter_comments_by_max_depth(self, max_depth, comments=None):\n\t\tif comments is None: \n\t\t\treturn\n\t\tfor i, c in reverse_enumerate(comments):\n\t\t\t# If the comment has no children at a sufficient depth, delete it altogether,\n\t\t\t# Else apply the same algorithm to its children\n\t\t\tprint i, \" -> \", self.max_comment_depth(c), \" v \", (max_depth-1)\n\t\t\tif self.max_comment_depth(c) < (max_depth-1):\n\t\t\t\tprint \" ignoring\", i\n\t\t\telif isinstance(c, praw.models.Comment):\n\t\t\t\tself.commentlist.append(c)\n\t\t\t\tprint \" saving and recursing\", i\n\t\t\t\tself.filter_comments_by_max_depth(max_depth=max_depth-1, comments=c.replies)", "def max_depth(expr):\r\n if isinstance(expr, Atom):\r\n return 1\r\n else:\r\n return 1 + max([ max_depth(arg) for arg in expr.args ])", "def depth(self) -> float:", "def maxDepth(self):\n\t\tif not self.root:\n\t\t\treturn 0\n\t\treturn self._maxDepth(self.root)", "def max_depth():\n return ctoast.timing_manager_max_depth()", "def get_descendant_depth(descendant, top_ancestor):\n # initialise return value\n depth = 0\n\n while descendant != top_ancestor:\n # increase depth\n depth += 1\n # move up level within tree\n descendant = descendant.ancestor\n\n # return depth\n return depth", "def _get_max_depth(self, node):\n if not node:\n return 0\n return 1 + max(self._get_max_depth(node.left),\n self._get_max_depth(node.right))", "def depth(self):\n return self._depth * 10", "def compute_depth(self):\n\t\t\n\t\tself.depth = self.url.count(\"/\") - startURL.count(\"/\")", "def depth(self, TreeNode):\n if not TreeNode:\n return 0\n return 1 + max(self.depth(TreeNode.leftNode),self.depth(TreeNode.rightNode))", "def compute_depth(self,url):\n\t\t\n\t\treturn url.count(\"/\")", "def _depth_node(self, root, depth=0):\n if root is None:\n return depth\n return max(self._depth_node(root.leftChild, depth + 1),\n self._depth_node(root.rightChild, depth + 1))", "def _depth(self, l):\n if isinstance(l, list) and len(l) > 0:\n return 1 + max(self._depth(item) for item in l)\n else:\n return 0", "def getMaxDepth(self):\n return self.getOrDefault(self.maxDepth)", "def get_depth(self):\n depth = 0\n cur = self\n if cur.type == \"spawn\":\n depth += 1\n while cur.parent is not None:\n cur = cur.parent\n if cur.type == \"spawn\":\n depth += 1\n return depth", "def get_depths(self):\n depths = torch.zeros(self.max_node + 1, 1, dtype=torch.int32)\n depth_determiner = torch.ones(self.max_node + 1, 1, dtype=torch.int32)\n adj_matrix = self.remove_self_loops().get_adj_matrix()\n prev_parents = self.max_node + 1\n num_parents = self.max_node + 1\n while num_parents:\n depth_determiner = (adj_matrix.mm(depth_determiner) > 0).int()\n num_parents = sum(depth_determiner)\n if num_parents == prev_parents:\n return None\n prev_parents = num_parents\n depths += depth_determiner\n return depths.flatten()", "def max_path_depth(self) -> ConfigNodePropertyInteger:\n return self._max_path_depth", "def count_levels_nore(self, root):\n d = deque()\n d.append((1,root))\n maxlevels = 1\n while len(d):\n parent = d.pop()\n for child in parent[1].children:\n d.appendleft((parent[0]+1, child))\n maxlevels = max(maxlevels, parent[0])\n return maxlevels" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete all comments which don't have any descendents at depths >= max_depth
def filter_comments_by_max_depth(self, max_depth, comments=None): if comments is None: return for i, c in reverse_enumerate(comments): # If the comment has no children at a sufficient depth, delete it altogether, # Else apply the same algorithm to its children print i, " -> ", self.max_comment_depth(c), " v ", (max_depth-1) if self.max_comment_depth(c) < (max_depth-1): print " ignoring", i elif isinstance(c, praw.models.Comment): self.commentlist.append(c) print " saving and recursing", i self.filter_comments_by_max_depth(max_depth=max_depth-1, comments=c.replies)
[ "def delete_children(self, comments):\n parents = []\n for comment in comments.values():\n if comment['parent']:\n parents.append(comment['parent'])\n\n for comment in comments.values():\n if comment['id'] not in parents:\n # Parent comments that have been removed are still returned\n # when children exist and are authored by _nobody_. Such\n # should not be deleted remotely, but only marked internally.\n if comment['who'] != '_nobody_':\n self.delete(comment['id'])\n del comments[comment['id']]\n\n return comments", "def remove_invisible_nodes(rend_tree):\n Q = deque(rend_tree.roots) # initialize the queue with the roots (usually only one root)\n cnt = 0\n while len(Q) > 0:\n #print \"QUEUE: \", \",\".join([_remove_namespace(el.info) for el in Q])\n n = Q.popleft()\n\n if _remove_namespace(n.info) in _IGNORE_TAGS or n.info == \"comment\":\n #print \" !!!!! DELETE !!!!! \", _remove_namespace(n.info)\n \"\"\"\n - we visit n's children and update parent\n \"\"\"\n for c in n.children:\n c.parent = n.parent\n\n \"\"\"\n - then we visit n.parent and:\n 1) delete n from children, and \n 2) insert at the position of n \n all the children of n\n \"\"\"\n if isinstance(n.parent, RenderTree): # n is a root\n child_pos = n.parent.roots.index(n)\n n.parent.roots = n.parent.roots[0:child_pos] + n.children + n.parent.roots[child_pos+1:]\n\n else: # n is an inner node\n child_pos = n.parent.children.index(n)\n n.parent.children = n.parent.children[0:child_pos] + n.children + n.parent.children[child_pos+1:]\n\n cnt += 1\n\n Q.extend(n.children)\n\n print \"Deleted {} nodes\".format(cnt)\n return rend_tree", "def remove_bad_comments(threshold=0, num_to_check=15):\n user = reddit.redditor(USERNAME)\n for comment in user.comments.new(limit=num_to_check):\n if comment.score < threshold:\n comment.delete()", "def test_delete_reassignes_depth_large_tree(bst):\n bst.insert(8)\n bst.insert(5)\n bst.insert(3)\n bst.insert(2)\n bst.insert(1)\n assert bst.root.left.left.left.left.depth == 1\n assert bst.root.left.left.left.depth == 2\n assert bst.root.left.left.depth == 3\n assert bst.root.left.depth == 4\n assert bst.root.depth == 5\n bst.delete(2)\n assert bst.root.left.left.left.depth == 1\n assert bst.root.left.left.depth == 2\n assert bst.root.left.depth == 3\n assert bst.root.depth == 4", "def pruneTree(self):\n self.stats.stopwatch('pruneTree').begin()\n\tdef pruneIt(n):\n newchildren = []\n delchildren = []\n for c in n.children:\n if self.prune(c) or not self.cspace.feasible(c.x):\n delchildren.append(c)\n else:\n newchildren.append(c)\n\t for c in delchildren:\n c.parent = None\n c.destroy()\n n.children = newchildren\n return True\n\tnewNodes = []\n\tdef addNodes(n):\n\t newNodes.append(n)\n\tassert not self.prune(self.root),\"Root node is asked to be pruned... can't handle this case\"\n\tself.root.traverse(pruneIt)\n\tself.root.traverse(addNodes)\n\tself.nodes = newNodes\n self.nearestNeighbors.set([n.x for n in self.nodes],self.nodes)\n self.stats.stopwatch('pruneTree').end()", "def prune_tree(tree, paths):\n raise NotImplemented()", "def deep_trees_tree_del(base):\n j = os.path.join\n F = j(base, 'F', 'alpha')\n D = j(base, 'D', 'D1')\n DF = j(base, 'DF', 'D1')\n DD = j(base, 'DD', 'D1')\n DDF = j(base, 'DDF', 'D1')\n DDD = j(base, 'DDD', 'D1')\n main.run_svn(None, 'rm', F, D, DF, DD, DDF, DDD)", "def delete_orphan_course_run_nodes(apps, schema_editor):\n TreeNode = apps.get_model(\"cms\", \"TreeNode\")\n\n for node in (\n TreeNode.objects.filter(\n cms_pages__isnull=True, parent__cms_pages__course__isnull=False\n )\n .distinct()\n .iterator()\n ):\n # When deleting a node, we must also update its parent to decrement\n # its number of children\n parentpath = node.path[0 : (node.depth - 1) * 4]\n parent = TreeNode.objects.get(path=parentpath)\n node.delete()\n parent.numchild -= 1\n parent.save()", "def __delete_category_tree(self, tree: Optional[CategoryTree]) -> NoReturn:\n if tree is None:\n return\n self.session.execute(\"PRAGMA foreign_keys=ON;\")\n (self.session.query(CategoryModel)\n .filter(and_(\n CategoryModel.user_id == self.user_id,\n CategoryModel.id == tree.id))\n .delete())\n self.session.commit()\n if tree.children is not None:\n for child in tree.children:\n self.__delete_category_tree(child)", "async def delete(self, db: AsyncSession, *, id: int, user: User) -> Comment:\n # Fetch comments from all levels of the tree\n l1_comments = (\n select(Comment.id)\n .where(\n Comment.deleted_at == EPOCH,\n Comment.id == id,\n )\n .alias(\"l1\")\n )\n\n l2_comments = (\n select(Comment.id)\n .where(Comment.deleted_at == EPOCH, Comment.parent_id == l1_comments.c.id)\n .lateral(\"l2\")\n )\n\n l3_comments = (\n select(Comment.id)\n .where(Comment.deleted_at == EPOCH, Comment.parent_id == l2_comments.c.id)\n .lateral(\"l3\")\n )\n\n # Join comments of all levels in the tree.\n # Convert all level comment id columns to rows & get unique of ids\n query = select(\n distinct(\n func.unnest(\n array((l1_comments.c.id, l2_comments.c.id, l3_comments.c.id))\n )\n ).label(\"id\")\n ).select_from(\n l1_comments.outerjoin(l2_comments, true()).outerjoin(l3_comments, true())\n )\n\n comment_ids = await self.get_q(db, query=query)\n\n # Update all comments from above ids as deleted\n delete_stmt = (\n update(Comment)\n .where(\n Comment.id.in_(comment_ids),\n )\n .values(\n {Comment.deleted_at: datetime.utcnow(), Comment.updated_by_id: user.id}\n )\n )\n await db.execute(delete_stmt)\n await db.commit()", "def new_child_comments():\n c.execute('''SELECT * FROM comments WHERE is_root=0 AND posted=0''')\n for comment in c.fetchall():\n yield comment", "def prune(self):\n for leaf in self.leaves:\n if leaf.lower > self.upper_glob:\n self.leaves.remove(leaf)", "def recursiveRemove(path):", "def flatten_comments(root_comments):\n all_comments = []\n nodes = root_comments[:]\n while nodes:\n node = nodes.pop()\n data = node['data']\n if 'body' not in data:\n #\n # weird child node\n #\n continue\n comment = Comment(data['body'], int(data['ups']), int(data['downs']))\n all_comments.append(comment)\n if data['replies']:\n for reply in data['replies']['data']['children']:\n nodes.append(reply)\n return all_comments", "def delete_recursive(system, path):\n from designsafe.apps.data.models.elasticsearch import IndexedFile\n hits = walk_children(system, path, include_parent=True, recurse=True)\n idx = IndexedFile.Index.name\n client = get_connection('default')\n\n # Group children in batches of 100 for bulk deletion.\n for group in grouper(hits, 100):\n filtered_group = filter(lambda hit: hit is not None, group)\n ops = map(lambda hit: {'_index': idx,\n '_id': hit.meta.id,\n '_op_type': 'delete'},\n filtered_group)\n bulk(client, ops)", "def _delete_max(self):\n assert not self.parent, 'self should be root.'\n\n if not self.right:\n # self is max, so delete self.\n self_left = self.left\n self._cut('left')\n return self_left\n\n grand = self\n parent = grand.right\n child = parent.right\n while child:\n grand = grand.right\n parent = parent.right\n child = child.right\n\n # parent is max, so delete parent.\n #\n # grand\n # \\\n # --- cut\n # \\\n # parent\n # / \\\n # cut --- \\\n # / \\\n # parent_left child(None)\n #\n parent_left = parent.left\n grand._cut('right')\n parent._cut('left')\n grand._connect('right', parent_left)\n return self", "def cleanup(parentnodes, name):\n todelete = []\n for item in parentnodes:\n todelete = todelete + item.Childs()\n\n for item in todelete:\n if item.Name().startswith(name):\n item.Delete()", "def prune(self):\n self.branches = [b for b in self.branches if not self.contr(b)]", "def test_seed_child_comments(self):\n out1 = StringIO()\n call_command('seed_comments', number_roots=5, stdout=out1)\n out = StringIO()\n call_command('seed_comments', number_children=10, stdout=out)\n expected_out = \"Creating 10 new child comments\"\n self.assertIn(expected_out, out.getvalue())\n self.assertEqual(Comment.objects.count(), 15)\n for comment in Comment.objects.all():\n self.assertIsNotNone(comment.post)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flattens a chain of comments, but stops if it gets to an ambiguous point where a comment has more than one child (or no children)
def flatten(self, comment=None): print 'flattening' if comment is None: print 'comment is none' comment = self.commentlist[0] while isinstance(comment, praw.models.Comment): print comment.body_html yield comment comment = comment.replies[0]
[ "def flatten_comments(root_comments):\n all_comments = []\n nodes = root_comments[:]\n while nodes:\n node = nodes.pop()\n data = node['data']\n if 'body' not in data:\n #\n # weird child node\n #\n continue\n comment = Comment(data['body'], int(data['ups']), int(data['downs']))\n all_comments.append(comment)\n if data['replies']:\n for reply in data['replies']['data']['children']:\n nodes.append(reply)\n return all_comments", "def flatten(container, lvl=1, accessor=lambda x: x):\n for i in container:\n if not isinstance(i, (list,tuple)):\n yield (i, lvl) # yield current comment\n if isinstance(accessor(i), (list,tuple)) and accessor(i):\n for j in flatten(accessor(i), lvl+1, accessor):\n yield j # yield flattened out children", "def filter_comments_by_max_depth(self, max_depth, comments=None):\n\t\tif comments is None: \n\t\t\treturn\n\t\tfor i, c in reverse_enumerate(comments):\n\t\t\t# If the comment has no children at a sufficient depth, delete it altogether,\n\t\t\t# Else apply the same algorithm to its children\n\t\t\tprint i, \" -> \", self.max_comment_depth(c), \" v \", (max_depth-1)\n\t\t\tif self.max_comment_depth(c) < (max_depth-1):\n\t\t\t\tprint \" ignoring\", i\n\t\t\telif isinstance(c, praw.models.Comment):\n\t\t\t\tself.commentlist.append(c)\n\t\t\t\tprint \" saving and recursing\", i\n\t\t\t\tself.filter_comments_by_max_depth(max_depth=max_depth-1, comments=c.replies)", "def get_flattened_comments(self) -> List[Comment]:\n return self.comments.list()", "async def format_nested_comments(\n db: AsyncSession,\n *,\n comments: List[Comment],\n permalink: str,\n user: Optional[User] = None,\n ) -> CommentListOut:\n\n users, comment_actions = await crud.comment.fetch_comments_data(\n db, comments=comments, user=user\n )\n\n l1_index = {}\n l1_comments = []\n for comment in comments:\n if permalink is None:\n content_link = ContentInBase().generate_permalink(\n comment.content.permalink, comment.content.id\n )\n else:\n content_link = permalink\n\n if comment.l1_id:\n (l1_index, l1_comments,) = await crud.comment.format_single_comment(\n comment,\n level=1,\n index=l1_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l1_comments,\n schema=CommentL1Out,\n )\n l1_index_obj = l1_index[comment.l1_id]\n\n if comment.l2_id:\n l2_index = l1_index_obj[\"child_index\"]\n l2_comments = l1_comments[l1_index_obj[\"list_id\"]].comments\n\n (l2_index, l2_comments,) = await crud.comment.format_single_comment(\n comment,\n level=2,\n index=l2_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l2_comments,\n schema=CommentL2Out,\n )\n l2_index_obj = l2_index[comment.l2_id]\n\n if comment.l3_id:\n l3_index = l2_index_obj[\"child_index\"]\n l3_comments = l2_comments[l2_index_obj[\"list_id\"]].comments\n\n await crud.comment.format_single_comment(\n comment,\n level=3,\n index=l3_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l3_comments,\n schema=CommentL3Out,\n )\n\n l1_total = comments[0].l1_total if comments else 0\n master_comments_out = CommentListOut(\n comments=l1_comments, comments_total=l1_total\n )\n return master_comments_out", "def get_comments(comments):\n from utils import quick_encrypt\n if comments is None:\n return []\n elif isinstance(comments, praw.models.reddit.more.MoreComments):\n return []\n elif isinstance(comments, praw.models.reddit.comment.Comment):\n author = None\n if comments.author:\n author = quick_encrypt(comments.author.name)\n return [(comments.body, author)]\n elif isinstance(comments, praw.models.comment_forest.CommentForest):\n combined = []\n for comment in (comments.list()):\n combined = combined + get_comments(comment)\n return combined\n elif isinstance(comments, list):\n return []\n else:\n print(type(comments))\n print(comments)", "def new_child_comments():\n c.execute('''SELECT * FROM comments WHERE is_root=0 AND posted=0''')\n for comment in c.fetchall():\n yield comment", "def flatten(iterator, dont_flatten=()):\n for element in iterator:\n if (isinstance(element, Iterable) and\n not isinstance(element, tuple(dont_flatten)+(str, bytes))):\n yield from flatten(element, dont_flatten=dont_flatten)\n else:\n yield element", "def flatten(self):\n flattened_text = [self.text or '']\n for child in list(self):\n flattened_text.append(child.flatten())\n flattened_text.append(child.tail or '')\n self.remove(child)\n return ''.join(flattened_text)", "def flatten(iter):\n out = []\n for x in iter:\n if not x:\n continue\n if isinstance(x, (list, tuple, set)):\n out += flatten(x)\n else:\n out.append(x)\n return out", "def _recurse(self, parse, arc):\n if arc.rule.is_terminal:\n return '[.{} {}]'.format(arc.rule.parent, arc.rule.first)\n parse = '[.{} '.format(arc.rule.parent)\n for child in arc.history:\n parse += self._recurse(parse, child)\n parse += ']'\n return parse", "def flatten(arr):\n for val in arr:\n if isinstance(val, list):\n for sub in flatten(val):\n if sub is not None:\n yield sub\n else:\n if val is not None:\n yield val", "def parenthesize(T, p):\n print p.element(), # use of end avoids trailing newline\n if not T.is_leaf(p):\n first_time = True\n for c in T.children(p):\n sep ='(' if first_time else ',' # determine proper separator\n print sep,\n first_time = False # any future passes will not be the first\n parenthesize(T, c) # recur on child\n print ')', # include closing parenthesis", "def get_top_level_comments(self) -> Generator[Tuple[str, str], None, None]:\n for comment in self.comments:\n yield (comment.id, comment.body)", "def _reflowComments(self, comments, max_length):\n flowed_comments = []\n\n for comment in comments:\n lines = comment.split(\"\\n\")\n for line in lines:\n if len(line) > max_length:\n line = textwrap.wrap(line, max_length)\n flowed_comments.extend(line)\n else:\n flowed_comments.append(line)\n\n return flowed_comments", "def new_root_comments():\n c.execute('''SELECT * FROM comments WHERE is_root=1 AND posted=0''')\n for comment in c.fetchall():\n yield comment", "def _unflatten_tree(self):\r\n # the first DIE in the list is the root node\r\n root = self._dielist[0]\r\n parentstack = [root]\r\n\r\n for die in self._dielist[1:]:\r\n if not die.is_null():\r\n cur_parent = parentstack[-1]\r\n # This DIE is a child of the current parent\r\n cur_parent.add_child(die)\r\n die.set_parent(cur_parent)\r\n if die.has_children:\r\n parentstack.append(die)\r\n else:\r\n # parentstack should not be really empty here. However, some\r\n # compilers generate DWARF that has extra NULLs in the end and\r\n # we don't want pyelftools to fail parsing them just because of\r\n # this.\r\n if len(parentstack) > 0:\r\n # end of children for the current parent\r\n parentstack.pop()", "def getFlattenDeps (self, skipOptional = True):\n return self.root.flatten (skipOptional)", "def _reconstruct_threads(asset):\n id = asset['_id']\n parents = defaultdict(list)\n for c in asset['comments']:\n p_id = c['parent_id']\n if isinstance(p_id, float) and math.isnan(p_id):\n p_id = id\n parents[p_id].append(c)\n\n threads = []\n for top_level_parent in sorted(parents[id], key=lambda p: p['date_created']):\n threads.append(_reconstruct_thread(top_level_parent, parents))\n asset['threads'] = threads\n return asset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Serialize a flat sequence of comments into an array of dicts that can easily be serialized to JSON.
def comments_to_dicts(self, comments): list_of_dicts = [{ "author": c.author.name, "body_html":c.body_html, "created_utc":c.created_utc, "permalink":c.permalink(True) } for c in comments] return list_of_dicts
[ "def getCommentDictList(comment_list):\n comment_dict_list = []\n for comment in comment_list:\n comment_dict = {}\n author_dict = getAuthorDict(comment.user)\n comment_dict[\"author\"] = author_dict\n comment_dict[\"comment\"] = comment.content\n # TODO python datetime is not JSON serializable\n formatter = \"%a %b %d %H:%M:%S mst %Y\"\n timestring = comment.published_date.strftime(formatter)\n comment_dict[\"pubDate\"] = timestring\n comment_dict[\"guid\"] = comment.guid\n comment_dict_list.append(comment_dict)\n return comment_dict_list", "def get_json(self):\n\t\treturn json.dumps(self.comment_data)", "def format_comments(self, contents):\n comment_template, reply_template = self.format_templates()\n comments = []\n for i, comment in enumerate(contents):\n comment['num'] = i + 1\n comments.append(comment_template.format(**comment))\n for j, reply in enumerate(comment['replies']):\n reply['num'] = j + 1\n if 'content' not in reply.keys():\n reply['content'] = ''\n comments.append(reply_template.format(**reply))\n comments.append('\\n\\n')\n\n return comments", "def parse_comments(self, comments):\n # Keep fields as uppercase in case we want to compare back with\n # UniProt.Record.comments\n comments = [str.split(comment, \":\", 1) for comment in comments]\n comments = [(str.strip(comment[0]), str.strip(comment[1])) for\n comment in comments]\n return dict(comments)", "def get_flattened_comments(self) -> List[Comment]:\n return self.comments.list()", "def save_all(comments, filename):\n with open(filename, \"w\") as f:\n json.dump(comments, f)\n num = len(comments)\n size = os.path.getsize(filename)\n print(\"\\nDone. Written %d comments to file '%s' (%d bytes).\" %\n (num, filename, size))", "def get_comments(comments):\n from utils import quick_encrypt\n if comments is None:\n return []\n elif isinstance(comments, praw.models.reddit.more.MoreComments):\n return []\n elif isinstance(comments, praw.models.reddit.comment.Comment):\n author = None\n if comments.author:\n author = quick_encrypt(comments.author.name)\n return [(comments.body, author)]\n elif isinstance(comments, praw.models.comment_forest.CommentForest):\n combined = []\n for comment in (comments.list()):\n combined = combined + get_comments(comment)\n return combined\n elif isinstance(comments, list):\n return []\n else:\n print(type(comments))\n print(comments)", "def serialize_diff_comments_js_model_data(self, diff_comments):\n diff_comments_data = []\n\n for comment in diff_comments:\n key = '%s' % comment.filediff_id\n\n if comment.interfilediff_id:\n key = '%s-%s' % (key, comment.interfilediff_id)\n\n diff_comments_data.append((str(comment.pk), key))\n\n return diff_comments_data", "def get_comments(self):\n comments = self.data().get('comments', {}).get('data', [])\n migration_key = FacebookPost.migration.get_value_for_datastore(self)\n return (FacebookComment(key_name_parts=(cmt['id'], migration_key.name()),\n json_data=json.dumps(cmt))\n for cmt in comments)", "def comment_to_dictionary(comment):\n p = dict()\n p['comment'] = comment.text\n p['date'] = comment.time\n p['time'] = time.mktime(comment.time.timetuple())\n p['type'] = comment.type\n return p", "def comments(self):\n comments = self.get_edges() \\\n .get(API_EDGE_TYPE.HAS_COMMENT_FROM, {}) \\\n .values()\n comments.sort(key=lambda x: x.created_ts)\n return comments", "def comments(self):\r\n from .._impl.comments import Comment\r\n cs = []\r\n start = 1\r\n num = 100\r\n nextStart = 0\r\n url = \"%s/sharing/rest/content/items/%s/comments\" % (self._portal.url, self.id)\r\n while nextStart != -1:\r\n params = {\r\n \"f\" : \"json\",\r\n \"start\" : start,\r\n \"num\" : num\r\n }\r\n res = self._portal.con.post(url, params)\r\n for c in res['comments']:\r\n cs.append(Comment(url=\"%s/%s\" % (url, c['id']),\r\n item=self, initialize=True))\r\n start += num\r\n nextStart = res['nextStart']\r\n return cs", "def _make_xml_comments(self, mt_comment):\n comments = []\n clist = mt_comment.split(\"run_ids:\")\n for item in clist:\n if \":\" in item:\n k, v = item.split(\":\")\n comments.append(inventory.Comment(v, subject=k))\n elif \"[\" in item and \"]\" in item:\n for run in item.replace(\"[\", \"\").replace(\"]\", \"\").split(\",\"):\n run = run.strip()\n if run:\n comments.append(\n inventory.Comment(run.strip(), subject=\"mt.run.id\")\n )\n return comments", "def __get_comments(self, root):\n comments_root = self.__expand_shadow_element_by_tag_name(root, 'mr-comment-list')\n\n list_of_comments = comments_root.find_elements_by_tag_name('mr-comment')\n print ('[*] %d comments' %len(list_of_comments))\n comments = []\n for c in list_of_comments:\n comment_root = self.__expand_shadow_element(c)\n comment_header = comment_root.find_element_by_css_selector('div>div').text.replace('\\n', ' ')\n \n m = re.match(self.comment_pattern, comment_header)\n blank_comment = { 'comment_id':'', 'comment_datetime':'', \n 'comment_author':'', 'comment_message': ' '} \n if m:\n comment_id = m.group(1).strip('\\n\\r ')\n if not 'Deleted' in comment_header:\n message_root = self.__expand_shadow_element_by_css_selector(comment_root, '.comment-body>mr-comment-content')\n lines = message_root.find_elements_by_css_selector('.line')\n\n comments.append({\n 'comment_id': comment_id,\n 'comment_datetime': m.group(4).strip('\\n\\r '),\n 'comment_author' : m.group(3).strip('\\n\\r '),\n 'comment_message': ' '.join([l.text.strip('\\n\\r ') for l in lines]) \n })\n else:\n blank_comment['comment_id'] = comment_id\n comments.append(blank_comment) \n else:\n comments.append(blank_comment) \n return comments", "def parse_comments(media_json):\n comments_attributes = media_json['edge_media_to_parent_comment']\n\n # iterate over comments\n comments = []\n for edge in comments_attributes['edges']:\n comments.append(edge['node']['text'])\n\n return comments", "def get_comments(rc_file, submissions):\n comments = {}\n with bz2.open(rc_file, 'rt', encoding=\"utf-8\") as f:\n for line in f:\n try:\n comment = json.loads(line)\n sid = get_linked_submission_id(comment)\n if sid in submissions.keys():\n comments[get_comment_id(comment)] = comment\n except Exception:\n traceback.print_exc()\n pass\n return comments", "def save_comments(comments: dict):\n\n # store comments in JSON file\n CREATED_FILES.append(COMMENTS_FILE_JSON)\n with open(COMMENTS_FILE_JSON, \"w\") as f:\n f.write(json.dumps(comments, indent=4))\n\n # create a textual representation of the discovered comments\n CREATED_FILES.append(COMMENTS_FILE_TXT)\n with open(COMMENTS_FILE_TXT, \"w\") as f:\n for ip, ports_node in comments.items():\n for portid, hosts_node in ports_node.items():\n # try to guess protocol prefix for the current network endpoint\n protocol_prefix = \"\"\n if str(portid) == \"80\":\n protocol_prefix = \"http://\"\n elif str(portid) == \"443\":\n protocol_prefix = \"https://\"\n\n # iterate over the host names and all its discovered comments\n for host, cur_comments_node in hosts_node.items():\n header = \"**** %s:%s - %s ****\" % (ip, str(portid), host)\n full_header = \"*\" * len(header) + \"\\n\" + header + \"\\n\" + \"*\" * len(header) + \"\\n\"\n f.write(full_header)\n\n for path, cur_comments in cur_comments_node.items():\n f.write(\"-\" * 80 + \"\\n\")\n f.write(\" [+] %s\\n\" % (protocol_prefix + host + path))\n f.write(\"-\" * 80 + \"\\n\")\n\n # print all of the comments\n for comment in cur_comments:\n justification = 14\n f.write((\" Line %d: \" % int(comment[\"line\"])).ljust(justification))\n lines = comment[\"comment\"].splitlines()\n if lines:\n f.write(lines[0] + \"\\n\")\n if len(lines) > 1:\n for line in lines[1:]:\n f.write(\" \" + \" \" * justification + line + \"\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")", "def items(self):\n\n return [(c.id, c) for c in self.comments]", "def bind2issues(issues, comments):\n issues_with_comments = copy.deepcopy(issues)\n c2i = {}\n for i, comm in enumerate(comments):\n key = f'{comm[\"projectKey\"]}-{comm[\"issueNumber\"]}'\n if key not in c2i.keys():\n c2i[key] = [i]\n else:\n c2i[key].append(i)\n\n for iss in issues_with_comments:\n key = f'{iss[\"projectKey\"]}-{iss[\"issueNumber\"]}'\n if key in c2i.keys():\n issue_comments = []\n for i in c2i[key]:\n issue_comments.append({\n k: v for k, v in comments[i].items() if k in ['commentAuthor', 'commentCreated', 'commentText']\n })\n iss['issueComments'] = issue_comments\n\n return issues_with_comments" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the flat comment array formatted as a JSON string to easily store in a file, etc.
def get_json(self): return json.dumps(self.comment_data)
[ "def get_flattened_comments(self) -> List[Comment]:\n return self.comments.list()", "def format_comments(self, contents):\n comment_template, reply_template = self.format_templates()\n comments = []\n for i, comment in enumerate(contents):\n comment['num'] = i + 1\n comments.append(comment_template.format(**comment))\n for j, reply in enumerate(comment['replies']):\n reply['num'] = j + 1\n if 'content' not in reply.keys():\n reply['content'] = ''\n comments.append(reply_template.format(**reply))\n comments.append('\\n\\n')\n\n return comments", "def comments_to_dicts(self, comments):\n\t\tlist_of_dicts = [{ \"author\": c.author.name, \"body_html\":c.body_html, \n\t\t\"created_utc\":c.created_utc, \"permalink\":c.permalink(True) } for c in comments]\n\t\treturn list_of_dicts", "def serialize_diff_comments_js_model_data(self, diff_comments):\n diff_comments_data = []\n\n for comment in diff_comments:\n key = '%s' % comment.filediff_id\n\n if comment.interfilediff_id:\n key = '%s-%s' % (key, comment.interfilediff_id)\n\n diff_comments_data.append((str(comment.pk), key))\n\n return diff_comments_data", "def getCommentDictList(comment_list):\n comment_dict_list = []\n for comment in comment_list:\n comment_dict = {}\n author_dict = getAuthorDict(comment.user)\n comment_dict[\"author\"] = author_dict\n comment_dict[\"comment\"] = comment.content\n # TODO python datetime is not JSON serializable\n formatter = \"%a %b %d %H:%M:%S mst %Y\"\n timestring = comment.published_date.strftime(formatter)\n comment_dict[\"pubDate\"] = timestring\n comment_dict[\"guid\"] = comment.guid\n comment_dict_list.append(comment_dict)\n return comment_dict_list", "def getAllComment(self):\n result = CommentDAO().getAllComment()\n mapped_result = self.buildMethod(result)\n return jsonify(Comment=mapped_result)", "def _get_comment_text():\n comment_samples = [\n \"Malesu mauris nas lum rfusce vehicula bibend. Morbi.\",\n \"Nuncsed quamal felis donec rutrum class ipsumnam teger. Sedin metusd metusdo quamnunc utcras facilis nequen.\",\n \"Adipisci ent neque eger vehicula dis. Miquis auctorpr quamphas purusp phasel duifusce parturi. Ris liberoa ligula lacini risus nean. Arcualiq cubilia aenean nuncnunc ulum fringi uisque abitur rerit setiam. Nean miproin aliquet risusvi tempusp aliquete. Integer nequenu bulum ibulum laoree accumsan ellus mus odio uis. Amet curae ivamus congue aliquama liberofu que.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In justov volutpat mus habitas dapibusc nequenu volutp justo. Quam blandi tur maurisd egesta erossed morbi turpis risus tate. Lacusp facilis class vehicula varius iaculis setiam montes pharetra. Usce ecenas quispr naeos nec nibhphas lacinia roin. Abitur maurisma metusqui justop uscras llam enas. Magnaqu faucibus sduis arcualiq imperd teger egetlor teger.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Conseq tristiq enas duis sociosqu eduis enimsed tudin vel. Lus semnunc risusm nulla parturi atein at placerat. Tiam laut nibhnul turpisn vitaenul eleifen commodo euismo quat posuered. Egestas nullain justop maurisin purusp donec nas liberofu aptent. Nec aliquam tiam puruscra turpisp luctus proin. Lectusin turpisn usce orcivest nullam eget arcuduis tdonec min. Esent cursus vulput aenean bulum lacini congued pretiu. Portamor bulum tate isse llam cidunt estmae.\\n\\nSque leocras fusce nullap fusce convall laoreet nibhnull estsusp. Roin aliquet esent ctetur blandit etiam nequesed viverr. Nislqu sse orciduis lacusp in tasse gravida lla ullam. Itnunc id mauris rerit entum disse lacinia. Oin luctus velit musetiam onec potenti ipsump volutp. Tortor musetiam bibendum onec esent libero esque sim. Enas ras eclass placerat sedin risusut vulput enimdon montes. Rhoncus dolorma estsusp facilis etsed llaut esque cursus. Nisl ullamcor tincid llus nulla iaculis.\",\n ]\n return random.choice(comment_samples)", "def _make_xml_comments(self, mt_comment):\n comments = []\n clist = mt_comment.split(\"run_ids:\")\n for item in clist:\n if \":\" in item:\n k, v = item.split(\":\")\n comments.append(inventory.Comment(v, subject=k))\n elif \"[\" in item and \"]\" in item:\n for run in item.replace(\"[\", \"\").replace(\"]\", \"\").split(\",\"):\n run = run.strip()\n if run:\n comments.append(\n inventory.Comment(run.strip(), subject=\"mt.run.id\")\n )\n return comments", "def as_json_text(self) -> str:\n return json.dumps([file.__dict__ for file in self.collection], indent=4)", "def save_comments(comments: dict):\n\n # store comments in JSON file\n CREATED_FILES.append(COMMENTS_FILE_JSON)\n with open(COMMENTS_FILE_JSON, \"w\") as f:\n f.write(json.dumps(comments, indent=4))\n\n # create a textual representation of the discovered comments\n CREATED_FILES.append(COMMENTS_FILE_TXT)\n with open(COMMENTS_FILE_TXT, \"w\") as f:\n for ip, ports_node in comments.items():\n for portid, hosts_node in ports_node.items():\n # try to guess protocol prefix for the current network endpoint\n protocol_prefix = \"\"\n if str(portid) == \"80\":\n protocol_prefix = \"http://\"\n elif str(portid) == \"443\":\n protocol_prefix = \"https://\"\n\n # iterate over the host names and all its discovered comments\n for host, cur_comments_node in hosts_node.items():\n header = \"**** %s:%s - %s ****\" % (ip, str(portid), host)\n full_header = \"*\" * len(header) + \"\\n\" + header + \"\\n\" + \"*\" * len(header) + \"\\n\"\n f.write(full_header)\n\n for path, cur_comments in cur_comments_node.items():\n f.write(\"-\" * 80 + \"\\n\")\n f.write(\" [+] %s\\n\" % (protocol_prefix + host + path))\n f.write(\"-\" * 80 + \"\\n\")\n\n # print all of the comments\n for comment in cur_comments:\n justification = 14\n f.write((\" Line %d: \" % int(comment[\"line\"])).ljust(justification))\n lines = comment[\"comment\"].splitlines()\n if lines:\n f.write(lines[0] + \"\\n\")\n if len(lines) > 1:\n for line in lines[1:]:\n f.write(\" \" + \" \" * justification + line + \"\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")", "def make_json(self, raw_string):\n return json.dumps([x.strip() for x in raw_string.split(',')])", "def get_comments(self):\n comments = self.data().get('comments', {}).get('data', [])\n migration_key = FacebookPost.migration.get_value_for_datastore(self)\n return (FacebookComment(key_name_parts=(cmt['id'], migration_key.name()),\n json_data=json.dumps(cmt))\n for cmt in comments)", "def get_comments(comments):\n from utils import quick_encrypt\n if comments is None:\n return []\n elif isinstance(comments, praw.models.reddit.more.MoreComments):\n return []\n elif isinstance(comments, praw.models.reddit.comment.Comment):\n author = None\n if comments.author:\n author = quick_encrypt(comments.author.name)\n return [(comments.body, author)]\n elif isinstance(comments, praw.models.comment_forest.CommentForest):\n combined = []\n for comment in (comments.list()):\n combined = combined + get_comments(comment)\n return combined\n elif isinstance(comments, list):\n return []\n else:\n print(type(comments))\n print(comments)", "def save_all(comments, filename):\n with open(filename, \"w\") as f:\n json.dump(comments, f)\n num = len(comments)\n size = os.path.getsize(filename)\n print(\"\\nDone. Written %d comments to file '%s' (%d bytes).\" %\n (num, filename, size))", "def __get_comments(self, root):\n comments_root = self.__expand_shadow_element_by_tag_name(root, 'mr-comment-list')\n\n list_of_comments = comments_root.find_elements_by_tag_name('mr-comment')\n print ('[*] %d comments' %len(list_of_comments))\n comments = []\n for c in list_of_comments:\n comment_root = self.__expand_shadow_element(c)\n comment_header = comment_root.find_element_by_css_selector('div>div').text.replace('\\n', ' ')\n \n m = re.match(self.comment_pattern, comment_header)\n blank_comment = { 'comment_id':'', 'comment_datetime':'', \n 'comment_author':'', 'comment_message': ' '} \n if m:\n comment_id = m.group(1).strip('\\n\\r ')\n if not 'Deleted' in comment_header:\n message_root = self.__expand_shadow_element_by_css_selector(comment_root, '.comment-body>mr-comment-content')\n lines = message_root.find_elements_by_css_selector('.line')\n\n comments.append({\n 'comment_id': comment_id,\n 'comment_datetime': m.group(4).strip('\\n\\r '),\n 'comment_author' : m.group(3).strip('\\n\\r '),\n 'comment_message': ' '.join([l.text.strip('\\n\\r ') for l in lines]) \n })\n else:\n blank_comment['comment_id'] = comment_id\n comments.append(blank_comment) \n else:\n comments.append(blank_comment) \n return comments", "def json(self):\n return ',\\n\\n'.join(s.json for s in self.samples)", "def GetJson(self):\n pretty_string = json.dumps(self.GetManifest(), indent=2)\n # json.dumps sometimes returns trailing whitespace and does not put\n # a newline at the end. This code fixes these problems.\n pretty_lines = pretty_string.split('\\n')\n return '\\n'.join([line.rstrip() for line in pretty_lines]) + '\\n'", "def to_json(self):\n return self.to_str(color=False, usetextmarker=True)", "def jsonify(text):\n\t#---remove comments because they might screw up the JSON\n\ttext = re.sub(r'([\\\"]{3}.*?[\\\"]{3})','\"REMOVED_BLOCK_COMMENT\"',text,flags=re.M+re.DOTALL)\n\t#---note that this fails if you use hashes inside of dictionary values\n\ttext = re.sub(r'(#.*?)\\n','',text,flags=re.M+re.DOTALL)\n\t#---strip trailing commas because they violate JSON rules\n\ttext = re.sub(r\",[ \\t\\r\\n]*}\",\"}\",text.replace(\"'\",\"\\\"\"))\n\t#---fix the case on all booleans\n\ttext = re.sub(\"True\",\"true\",text)\n\ttext = re.sub(\"false\",\"false\",text)\n\t#---! rpb is worried that this is a hack\n\treturn text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render a webpage out of the flattened comment data (Experimental)
def get_html(self): env = jinja2.Environment(loader=jinja2.PackageLoader('mane', 'templates')) template = env.get_template('basic.html') # Embed subreddit's css into the html page: style_info = self.reddit.subreddit("mylittlepony").stylesheet.__call__() subreddit_css = style_info.stylesheet images = style_info.images # substitute image urls for im in images: im_req = requests.get(im["url"]) mime_type = im_req.headers["content-type"] as_b64 = base64.b64encode(im_req.content) subreddit_css = subreddit_css.replace(im["link"], "url(data:%s;base64,%s)" % (mime_type, as_b64)) # in case not all authors were accounted for, map unknown authors to character "unknown" author_names = set(c["author"] for c in self.comment_data) default_author_map = dict((author, u"unknown") for author in author_names) author_map = default_author_map author_map.update(self.author_map) return template.render(unidecode=unidecode, time=time, subreddit_css=subreddit_css, author_map=author_map, characters=self.characters, comments=self.comment_data, title=self.thread_id)
[ "def crawl(thread_url):\n\tbase_url = \"https://np.reddit.com\"\n\tcomment_container = list()\n\treq = request.Request(base_url+thread_url, \n \tdata=None, \n \theaders={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n \t})\n\t\n\tcontent = request.urlopen(req).read()\n\tthread_name = thread_url.split(\"/\")[-2]+\".html\"\n\n\t# Saving as html\n\twith open(thread_name,\"w\") as txt:\t\n\t\ttxt.writelines(str(content))\n\n\t# Opening the html from disk\n\twith open(thread_name) as html:\n\t\tsoup = BeautifulSoup(html, \"html.parser\")\n\t\ts = soup.find_all(\"div\", {\"class\",\"content\"})\n\t\tif s:\n\t\t\ts = s[0].find_all(\"div\", id=lambda x: x and x.startswith('thing_t1_'))\n\t\t\tfor _s in s:\n\t\t\t\t# Getting the user that has posted the comment\n\t\t\t\tuser = _s[\"data-author\"]\n\t\t\t\t\n\t\t\t\t# Getting the text of the comment\n\t\t\t\ttext = _s.find(\"div\", {\"class\":\"md\"}).text\n\t\t\t\t# Need to do replacements to get the correct output\n\t\t\t\ttext = text.replace(\"\\\\xc3\\\\xa5\",\"å\").replace(\"\\\\xc3\\\\xb8\",\"ø\").replace(\"\\\\xc3\\\\xa6\",\"æ\")\n\t\t\t\t\n\t\t\t\t# Datetime for comment\t\t\t\n\t\t\t\ttime = _s.find(\"time\", {\"class\":\"live-timestamp\"})\n\t\t\t\ttime = time[\"datetime\"]\n\n\t\t\t\t# Link to comment\n\t\t\t\tlink = base_url+_s[\"data-permalink\"]\n\n\t\t\t\tcomment_container.append(Comment(user,text,time,link))\n\n\treturn comment_container", "def convert_comment_to_html(comment):\n author = db_helper.get_user_from_user_id(comment.author)\n comment_elem = comment_template.format(user_img=author.avatar_url,\n user_profile=url_for('user_profile', username=author.username),\n username=author.username,\n created=db_helper.get_readable_datetime(comment.created),\n content=comment.content,\n cmt_id=comment._id)\n return comment_elem", "def flatten(self, comment=None):\n\t\tprint 'flattening'\n\t\tif comment is None:\n\t\t\tprint 'comment is none'\n\t\t\tcomment = self.commentlist[0]\n\t\twhile isinstance(comment, praw.models.Comment):\n\t\t\tprint comment.body_html\n\t\t\tyield comment\n\t\t\tcomment = comment.replies[0]", "def _get_comment_text():\n comment_samples = [\n \"Malesu mauris nas lum rfusce vehicula bibend. Morbi.\",\n \"Nuncsed quamal felis donec rutrum class ipsumnam teger. Sedin metusd metusdo quamnunc utcras facilis nequen.\",\n \"Adipisci ent neque eger vehicula dis. Miquis auctorpr quamphas purusp phasel duifusce parturi. Ris liberoa ligula lacini risus nean. Arcualiq cubilia aenean nuncnunc ulum fringi uisque abitur rerit setiam. Nean miproin aliquet risusvi tempusp aliquete. Integer nequenu bulum ibulum laoree accumsan ellus mus odio uis. Amet curae ivamus congue aliquama liberofu que.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In justov volutpat mus habitas dapibusc nequenu volutp justo. Quam blandi tur maurisd egesta erossed morbi turpis risus tate. Lacusp facilis class vehicula varius iaculis setiam montes pharetra. Usce ecenas quispr naeos nec nibhphas lacinia roin. Abitur maurisma metusqui justop uscras llam enas. Magnaqu faucibus sduis arcualiq imperd teger egetlor teger.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Conseq tristiq enas duis sociosqu eduis enimsed tudin vel. Lus semnunc risusm nulla parturi atein at placerat. Tiam laut nibhnul turpisn vitaenul eleifen commodo euismo quat posuered. Egestas nullain justop maurisin purusp donec nas liberofu aptent. Nec aliquam tiam puruscra turpisp luctus proin. Lectusin turpisn usce orcivest nullam eget arcuduis tdonec min. Esent cursus vulput aenean bulum lacini congued pretiu. Portamor bulum tate isse llam cidunt estmae.\\n\\nSque leocras fusce nullap fusce convall laoreet nibhnull estsusp. Roin aliquet esent ctetur blandit etiam nequesed viverr. Nislqu sse orciduis lacusp in tasse gravida lla ullam. Itnunc id mauris rerit entum disse lacinia. Oin luctus velit musetiam onec potenti ipsump volutp. Tortor musetiam bibendum onec esent libero esque sim. Enas ras eclass placerat sedin risusut vulput enimdon montes. Rhoncus dolorma estsusp facilis etsed llaut esque cursus. Nisl ullamcor tincid llus nulla iaculis.\",\n ]\n return random.choice(comment_samples)", "def comments(thread_uid):\n thread = storage.get_thread(thread_uid, counter=True)\n if not thread:\n abort(404)\n return render_template('comments.html',\n comments=storage.list_comments(thread_uid),\n thread=thread,\n thread_uid=thread_uid)", "def show_comments(self, page):\n the_comments = WikiComment.comments_on_page(page)\n #the_comments = lazy_iter(sort_comments, the_comments)\n the_comments = sort_comments(the_comments)\n self.template_value['comments'] = the_comments", "def webpage(graph, head=\"\", style=\"\", body=(\"\",\"\"), **kwargs):\n s1 = render(graph, type=STYLE, **kwargs)\n s2 = render(graph, type=CANVAS, **kwargs)\n # Fix HTML source indentation:\n # f1 = indent each line\n # f2 = indent first line\n f1 = lambda s, t=\"\\t\": s.replace(\"\\n\",\"\\n\"+t)\n f2 = lambda s, t=\"\\t\": (\"\\n%s%s\" % (t,s.lstrip())).rstrip()\n return template % (\n f2(head), f1(s1), f2(style, \"\\t\\t\"), f1(body[0]), f1(\"\\n\"+s2), f2(body[1]))", "def reddit_page_handler(url):\n\tpayload = urllib2.urlopen(url).read()\n\tpayload = json.loads(payload)\n\tcomment_pages = []\n\tfor story in payload['data']['children']:\n\t\tstory = story['data']\n\t\tcomment_url = story['permalink']\n\t\tcomment_pages.append(comment_url)\n\treturn (comment_pages,payload['data']['after'])", "def graph():\n return render_template('main/graph.html')", "async def format_nested_comments(\n db: AsyncSession,\n *,\n comments: List[Comment],\n permalink: str,\n user: Optional[User] = None,\n ) -> CommentListOut:\n\n users, comment_actions = await crud.comment.fetch_comments_data(\n db, comments=comments, user=user\n )\n\n l1_index = {}\n l1_comments = []\n for comment in comments:\n if permalink is None:\n content_link = ContentInBase().generate_permalink(\n comment.content.permalink, comment.content.id\n )\n else:\n content_link = permalink\n\n if comment.l1_id:\n (l1_index, l1_comments,) = await crud.comment.format_single_comment(\n comment,\n level=1,\n index=l1_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l1_comments,\n schema=CommentL1Out,\n )\n l1_index_obj = l1_index[comment.l1_id]\n\n if comment.l2_id:\n l2_index = l1_index_obj[\"child_index\"]\n l2_comments = l1_comments[l1_index_obj[\"list_id\"]].comments\n\n (l2_index, l2_comments,) = await crud.comment.format_single_comment(\n comment,\n level=2,\n index=l2_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l2_comments,\n schema=CommentL2Out,\n )\n l2_index_obj = l2_index[comment.l2_id]\n\n if comment.l3_id:\n l3_index = l2_index_obj[\"child_index\"]\n l3_comments = l2_comments[l2_index_obj[\"list_id\"]].comments\n\n await crud.comment.format_single_comment(\n comment,\n level=3,\n index=l3_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l3_comments,\n schema=CommentL3Out,\n )\n\n l1_total = comments[0].l1_total if comments else 0\n master_comments_out = CommentListOut(\n comments=l1_comments, comments_total=l1_total\n )\n return master_comments_out", "def add_hypercomments(soup: BeautifulSoup, file_path: str, comments: Typings.HyperComments):\n\n def generate_comment_html(c: Dict[str, str]) -> str:\n return f\"\"\"\n<div class=\"hc-comment\">\n <img src=\"{c['avatar']}\" class=\"hc-avatar\">\n <div class=\"hc-content\">\n <div class=\"hc-header\">{c['title']}</div>\n <div class=\"hc-subheader\">\n <h3 class=\"hc-author\">{c['name']}</h3>\n <div class=\"hc-date\">{c['date'].replace(\"T\", \" \")}</div>\n </div>\n <p class=\"hc-quote\">{c['parent_text'] or '%REMOVE-EMPTY%'}</p>\n <p class=\"hc-text\">{c['text']}</p>\n </div>\n</div>\n \"\"\".replace('<p class=\"hc-quote\">%REMOVE-EMPTY%</p>', '')\n\n file_name_uri = extract_article_uri(file_path)\n comments_block = soup.select_one('.kmt-list')\n has_hc_comments = False\n if comments_block is not None:\n for comment in comments:\n if comment['uri'] is None:\n continue\n if comment['uri'] == file_name_uri:\n has_hc_comments = True\n add_children(soup, '.kmt-list', generate_comment_html(comment), 'li', {})\n if has_hc_comments:\n remove_element(soup, \".kmt-empty-comment\")", "def format_comments(self, contents):\n comment_template, reply_template = self.format_templates()\n comments = []\n for i, comment in enumerate(contents):\n comment['num'] = i + 1\n comments.append(comment_template.format(**comment))\n for j, reply in enumerate(comment['replies']):\n reply['num'] = j + 1\n if 'content' not in reply.keys():\n reply['content'] = ''\n comments.append(reply_template.format(**reply))\n comments.append('\\n\\n')\n\n return comments", "def scrape_comments(subreddit_list,verbose=True):\n r = praw.Reddit('Test by u/_Daimon_')\n X = []\n y = []\n for i, subreddit in enumerate(subreddit_list):\n comments = r.get_subreddit(subreddit).get_comments(limit=None)\n count=0\n for c in comments:\n \t X.append(c.body) \n\t y.append(i+1)\n\t count+=1\n\tif verbose:\n print '\\n%i comments from subreddit: %s fetched!'%(count,subreddit)\n return X, np.array(y).astype('int')", "def comments_to_dicts(self, comments):\n\t\tlist_of_dicts = [{ \"author\": c.author.name, \"body_html\":c.body_html, \n\t\t\"created_utc\":c.created_utc, \"permalink\":c.permalink(True) } for c in comments]\n\t\treturn list_of_dicts", "def render(view=False, preview=False):", "def render(self, user):\n self._render_text = self.content.replace('\\n', '<br>')\n return render_str(\"post.html\", p=self, user=user)", "def render(self, data):\n pass", "def comments_splitting(self, df):\n #df = df.apply(lambda x : text_preprocessing(x))\n good_reviews = df[df.label == \"POSITIVE\"]\n bad_reviews = df[df.label == \"NEGATIVE\"]\n #print(good_reviews)\n good_reviews_text = \" \".join(good_reviews.comments.to_numpy().tolist())\n bad_reviews_text = \" \".join(bad_reviews.comments.to_numpy().tolist())\n good_reviews_cloud = WordCloud(stopwords=STOPWORDS, background_color=\"white\").generate(good_reviews_text)\n bad_reviews_cloud = WordCloud(stopwords=STOPWORDS, background_color=\"white\").generate(bad_reviews_text)\n #print(good_reviews_cloud, bad_reviews_cloud)\n \n show_word_cloud(good_reviews_cloud, 'good comments')\n show_word_cloud(bad_reviews_cloud, 'bad comments')", "def _render_comment_diff(self, req, ticket, data, cnum):\n req.perm(ticket.resource).require('TICKET_VIEW')\n new_version = int(req.args.get('version', 1))\n old_version = int(req.args.get('old_version', new_version))\n if old_version > new_version:\n old_version, new_version = new_version, old_version\n elif old_version == new_version:\n old_version = new_version - 1\n\n history = {}\n for change in self._get_comment_history(req, ticket, cnum):\n history[change['version']] = change\n\n def version_info(version):\n path = _(\"Ticket #%(num)s, comment %(cnum)d\",\n num=ticket.id, cnum=cnum)\n if version:\n rev = _(\"Version %(num)s\", num=version)\n shortrev = 'v%d' % version\n else:\n rev, shortrev = _(\"Initial Version\"), _(\"initial\")\n return {'path': path, 'rev': rev, 'shortrev': shortrev}\n\n diff_style, diff_options, diff_data = get_diff_options(req)\n diff_context = 3\n for option in diff_options:\n if option.startswith('-U'):\n diff_context = int(option[2:])\n break\n if diff_context < 0:\n diff_context = None\n\n def get_text(version):\n try:\n text = history[version]['value']\n return text.splitlines() if text else []\n except KeyError:\n raise ResourceNotFound(_(\"No version %(version)d for comment \"\n \"%(cnum)d on ticket #%(ticket)s\",\n version=version, cnum=cnum,\n ticket=ticket.id))\n\n old_text = get_text(old_version)\n new_text = get_text(new_version)\n diffs = diff_blocks(old_text, new_text, context=diff_context,\n ignore_blank_lines='-B' in diff_options,\n ignore_case='-i' in diff_options,\n ignore_space_changes='-b' in diff_options)\n\n changes = [{'diffs': diffs, 'props': [],\n 'new': version_info(new_version),\n 'old': version_info(old_version)}]\n\n # -- prev/up/next links\n prev_version = old_version\n next_version = None\n if new_version < len(history) - 1:\n next_version = new_version + 1\n\n if prev_version:\n url = req.href.ticket(ticket.id, cnum=cnum, action='comment-diff',\n version=prev_version)\n add_link(req, 'prev', url, _(\"Version %(num)s\", num=prev_version))\n add_link(req, 'up', req.href.ticket(ticket.id, cnum=cnum,\n action='comment-history'),\n _(\"Ticket Comment History\"))\n if next_version:\n url = req.href.ticket(ticket.id, cnum=cnum, action='comment-diff',\n version=next_version)\n add_link(req, 'next', url, _(\"Version %(num)s\", num=next_version))\n\n prevnext_nav(req, _(\"Previous Change\"), _(\"Next Change\"),\n _(\"Ticket Comment History\"))\n add_stylesheet(req, 'common/css/diff.css')\n add_script(req, 'common/js/diff.js')\n\n data.update({\n 'title': _(\"Ticket Comment Diff\"),\n 'resource': ticket.resource,\n 'name': _(\"Ticket #%(num)s, comment %(cnum)d\",\n num=ticket.id, cnum=cnum),\n 'url': self._make_comment_url(req, ticket, cnum),\n 'old_url': self._make_comment_url(req, ticket, cnum, old_version),\n 'new_url': self._make_comment_url(req, ticket, cnum, new_version),\n 'diff_url': req.href.ticket(ticket.id, cnum=cnum,\n action='comment-diff',\n version=new_version),\n 'diff_action': 'comment-diff', 'diff_args': [('cnum', cnum)],\n 'old_version': old_version, 'new_version': new_version,\n 'changes': changes, 'diff': diff_data,\n 'num_changes': new_version - old_version,\n 'change': history[new_version],\n 'ticket': ticket, 'cnum': cnum,\n 'longcol': '', 'shortcol': ''\n })\n\n return 'diff_view.html', data, None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return names for all the nodes in the chain.
def _get_all_nodes(action_chain): all_nodes = [node.name for node in action_chain.chain] return all_nodes
[ "def names(self):\n return {node for node in self.graph.nodes if self.name_is_valid(node)}", "def getAllNodesNames(self, startingNode=None):\n if startingNode is not None:\n snode = self.grid.find(startingNode)\n returnNames = [node.name for node in snode.iter()]\n else:\n returnNames = self.mappingLevelName.values()\n\n return returnNames", "def get_nodes(self):\n\t\treturn node_names(self.network)", "def node_names():\n return list(node_mappings.keys())", "def nodes(self):\n # return [k for k in self.agents]\n return self.name_list", "def find_names(self, node):\n\n self.names = set()\n\n self.visit(node)\n\n return self.names", "def get_nodes(self):\n return set(self._names)", "def names(self, repo, node):\n return sorted(self.nodemap(repo, node))", "def get_left_node_names(self):\n return set({node.get_name() for node in self.get_left_nodeset()}) # return the set of names", "def child_names(self) -> List[str]:\n return [t.name for t in self.children]", "def get_right_node_names(self):\n return set({node.get_name() for node in self.get_right_nodeset()}) # return the set of names", "def get_node_set_names(self):\n nodeSetNames = self.__ex_get_names('EX_NODE_SET')\n return nodeSetNames", "def parent_names(self) -> List[str]:\n return [t.name for t in self.parents]", "def fetch_nodes():\n nodes = []\n all_names = []\n\n for node in nuke.allNodes(group=nuke.root()):\n if node.Class() in OUTPUT_CLASSES:\n nodes.append(node)\n\n all_names.append(node.name())\n\n return nodes, all_names", "def get_names(self):\n return self.clusters.keys()", "def nodes_to_unames(G, all_nodes):\n return [[G.nodes[n]['uname'] for n in k] for k in all_nodes]", "def _get_node_names(self, pipeline: dict, node_id_list: list) -> List:\n node_name_list = []\n pipeline_json = json.loads(json.dumps(pipeline))\n for node_id in node_id_list:\n found = False\n for single_pipeline in pipeline_json[\"pipelines\"]:\n for node in single_pipeline[\"nodes\"]:\n if node[\"id\"] == node_id:\n node_name_list.append(self._get_node_label(node))\n found = True\n break\n if found:\n break\n\n return node_name_list", "def get_children_names(self):\n children_names = self._state.children_names\n return children_names", "def names(self):\n\t\treturn self.store().names()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function which validates that the provided node name is defined in the workflow definition and it's valid. Keep in mind that we can only perform validation for task names which don't include jinja expressions since those are rendered at run time.
def _is_valid_node_name(self, all_node_names, node_name): if not node_name: # This task name needs to be resolved during run time so we cant validate the name now return True is_jinja_expression = jinja_utils.is_jinja_expression(value=node_name) if is_jinja_expression: # This task name needs to be resolved during run time so we cant validate the name # now return True return node_name in all_node_names
[ "def is_valid_node_name(name):\n return utils.is_hostname_safe(name) and (not uuidutils.is_uuid_like(name))", "def is_valid_workflow_name(name):\n return bool(re.match('(?s)^[a-zA-Z][a-zA-Z0-9_]*$',name))", "def validate_task_definition(taskdef_name, version):\n exit_if_none(taskdef_name, \"Missing task definition name\")\n if version:\n taskdef_name = f\"{taskdef_name}:{version}\"\n try:\n # ECS throws if it can't find a task definition\n taskdef = boto3.client('ecs').describe_task_definition(taskDefinition=taskdef_name).get('taskDefinition')\n return taskdef['taskDefinitionArn']\n except:\n return exit_if_none(None, f\"can't find task definition: {taskdef_name}\")", "def _validate_task_def(\n cls, task_name: str, task_def: Dict[str, Any], config: \"PoeConfig\"\n ) -> Optional[str]:\n issue = None\n return issue", "def validate_def(\n cls, task_name: str, task_def: TaskDef, config: \"PoeConfig\"\n ) -> Optional[str]:\n if not (task_name[0].isalpha() or task_name[0] == \"_\"):\n return (\n f\"Invalid task name: {task_name!r}. Task names must start with a letter\"\n \" or underscore.\"\n )\n elif not _TASK_NAME_PATTERN.match(task_name):\n return (\n f\"Invalid task name: {task_name!r}. Task names characters must be \"\n \"alphanumeric, colon, underscore or dash.\"\n )\n elif isinstance(task_def, dict):\n task_type_keys = set(task_def.keys()).intersection(cls.__task_types)\n if len(task_type_keys) == 1:\n task_type_key = next(iter(task_type_keys))\n task_content = task_def[task_type_key]\n task_type = cls.__task_types[task_type_key]\n if not isinstance(task_content, task_type.__content_type__):\n return (\n f\"Invalid task: {task_name!r}. {task_type} value must be a \"\n f\"{task_type.__content_type__}\"\n )\n else:\n for key in set(task_def) - {task_type_key}:\n expected_type = cls.__base_options.get(\n key, task_type.__options__.get(key)\n )\n if expected_type is None:\n return (\n f\"Invalid task: {task_name!r}. Unrecognised option \"\n f\"{key!r} for task of type: {task_type_key}.\"\n )\n elif not isinstance(task_def[key], expected_type):\n return (\n f\"Invalid task: {task_name!r}. Option {key!r} should \"\n f\"have a value of type {expected_type!r}\"\n )\n else:\n if hasattr(task_type, \"_validate_task_def\"):\n task_type_issue = task_type._validate_task_def(\n task_name, task_def, config\n )\n if task_type_issue:\n return task_type_issue\n if \"\\n\" in task_def.get(\"help\", \"\"):\n return (\n f\"Invalid task: {task_name!r}. Help messages cannot contain \"\n \"line breaks\"\n )\n else:\n return (\n f\"Invalid task: {task_name!r}. Task definition must include exactly\"\n f\" one task key from {set(cls.__task_types)!r}\"\n )\n return None", "def _process_node_name(scope, node_type: str, node_id: str, full_name=None):\n if node_type == \"Load\":\n # The Load operator needs to be renamed as it has the same name with parameter\n node_name = f'{scope}/{node_type}-op{node_id}'\n elif not full_name or \\\n any(full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n # process summary node\n node_name = f'{scope}/{node_type}-op{node_id}'\n else:\n node_name = full_name\n\n return node_name", "def test_node_template_requirements_with_wrong_node_keyname(self):\n tpl_snippet = '''\n node_templates:\n mysql_database:\n type: tosca.nodes.Database\n requirements:\n - host:\n nodes: mysql_dbms\n\n '''\n expectedmessage = _('\"requirements\" of template \"mysql_database\" '\n 'contains unknown field \"nodes\". Refer to the '\n 'definition to verify valid values.')\n err = self.assertRaises(\n exception.UnknownFieldError,\n lambda: self._single_node_template_content_test(tpl_snippet))\n self.assertEqual(expectedmessage, err.__str__())", "def _assert_all_graph_nodes_in_name_scope(self, graph, name):\n for node in graph.as_graph_def().node:\n self.assertIn(name, node.name)", "def validateTaskName(csvTaskname, logTaskName):\r\n if EVENT_DICT[csvTaskname.strip()] == logTaskName.strip():\r\n return True\r\n else:\r\n return False", "def validate_graph_name(graph_name):\n if graph_name in GRAPH_NAMES:\n return\n raise UserError((\"The status of %s is not valid. It must be one of %s\" %\n (graph_name, \", \".join(GRAPH_NAMES))))", "def _validate_generic_node_properties(self, node: Node, response: ValidationResponse, pipeline_runtime: str):\n node_label = node.label\n image_name = node.get_component_parameter(\"runtime_image\")\n filename = node.get_component_parameter(\"filename\")\n dependencies = node.get_component_parameter(\"dependencies\")\n env_vars = node.get_component_parameter(\"env_vars\")\n\n self._validate_filepath(\n node_id=node.id, node_label=node_label, property_name=\"filename\", filename=filename, response=response\n )\n\n # If not running locally, we check resource and image name\n if pipeline_runtime != \"local\":\n self._validate_container_image_name(node.id, node_label, image_name, response=response)\n for resource_name in [\"cpu\", \"gpu\", \"memory\"]:\n resource_value = node.get_component_parameter(resource_name)\n if resource_value:\n self._validate_resource_value(\n node.id,\n node_label,\n resource_name=resource_name,\n resource_value=resource_value,\n response=response,\n )\n\n self._validate_label(node_id=node.id, node_label=node_label, response=response)\n if dependencies:\n notebook_root_relative_path = os.path.dirname(filename)\n for dependency in dependencies:\n self._validate_filepath(\n node_id=node.id,\n node_label=node_label,\n file_dir=os.path.join(self.root_dir, notebook_root_relative_path),\n property_name=\"dependencies\",\n filename=dependency,\n response=response,\n )\n if env_vars:\n for env_var in env_vars:\n self._validate_environmental_variables(node.id, node_label, env_var=env_var, response=response)", "def test_node_template_with_wrong_requirements_keyname(self):\n tpl_snippet = '''\n node_templates:\n mysql_dbms:\n type: tosca.nodes.DBMS\n properties:\n root_password: aaa\n port: 3376\n requirement:\n - host: server\n '''\n expectedmessage = _('Node template \"mysql_dbms\" contains unknown '\n 'field \"requirement\". Refer to the definition to '\n 'verify valid values.')\n err = self.assertRaises(\n exception.UnknownFieldError,\n lambda: self._single_node_template_content_test(tpl_snippet))\n self.assertEqual(expectedmessage, err.__str__())", "def is_node_required(self, node_display_name: str) -> bool:\n node_label = self.get_node_label(node_display_name)\n\n mm_graph = self.se.get_nx_schema()\n node_required = mm_graph.nodes[node_label][\"required\"]\n\n return node_required", "def validate_name(name: str) -> None:\n if name in settings.TEAM_NAMES:\n return None\n\n raise ValidationError(_(\"%(name)s is not a valid team name\"), params={\"name\": name})", "def test_empty_task_name_is_invalid(user):\n with pytest.raises(ValueError):\n task = Task(\"\", True, user)", "def name_validation(name):\n if len(name) > 244:\n LOGGER.error(\"cluster-name is too long\")\n sys.exit(2)\n allowed = re.compile(r\"^[a-zA-Z\\d-]+$\")\n if not allowed.match(name):\n LOGGER.error(\"cluster-name '%s' is using illegal characters.\"\n \"Please change cluster-name in config file\", name)\n sys.exit(2)\n return name", "def name_is_valid(self, name):\n if isinstance(name, str):\n return not name.endswith(('_worker', '_localCollector', '_globalCollector'))\n else:\n return False", "def _check_node(self, node_path):\n if self.auto_groups:\n path, name = self.split_path(node_path)\n self._create_required_groups(path)\n\n if node_path in self:\n if self.delete_existing:\n if isinstance(self[node_path], H5Group):\n self.remove_group(node_path, recursive=True)\n else:\n self.remove_node(node_path)\n else:\n msg = self.exists_error.format(node_path, self.filename)\n raise ValueError(msg)", "def validate(self, data):\n node = data[\"node\"]\n creator = data[\"creator\"]\n\n valid_node_types = (\"Article\", \"Review\")\n if \"type\" not in node.json or node.json[\"type\"] not in valid_node_types:\n raise exceptions.ValidationError(\n dict(node=f\"Node type must be one of {', '.join(valid_node_types)}.\")\n )\n\n project = node.project\n if not project:\n raise exceptions.ValidationError(\n dict(node=\"Node must be linked to a project.\")\n )\n\n role = get_projects(creator).get(id=project.id).role\n if role not in [\"MANAGER\", \"OWNER\"]:\n raise exceptions.PermissionDenied(\n \"You need to be the project manager or owner to create a DOI for it.\"\n )\n\n AccountQuotas.DOIS_CREATED_MONTH.check(project.account)\n\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format ActionExecution result so it can be used in the final action result output.
def _format_action_exec_result( self, action_node, liveaction_db, created_at, updated_at, error=None ): if not isinstance(created_at, datetime.datetime): raise TypeError( f"The created_at is not a datetime object was({type(created_at)})." ) if not isinstance(updated_at, datetime.datetime): raise TypeError( f"The updated_at is not a datetime object was({type(updated_at)})." ) result = {} execution_db = None if liveaction_db: execution_db = ActionExecution.get(liveaction__id=str(liveaction_db.id)) result["id"] = action_node.name result["name"] = action_node.name result["execution_id"] = str(execution_db.id) if execution_db else None result["liveaction_id"] = str(liveaction_db.id) if liveaction_db else None result["workflow"] = None result["created_at"] = isotime.format(dt=created_at) result["updated_at"] = isotime.format(dt=updated_at) if error or not liveaction_db: result["state"] = action_constants.LIVEACTION_STATUS_FAILED else: result["state"] = liveaction_db.status if error: result["result"] = error else: result["result"] = liveaction_db.result return result
[ "def _format_result(result: CheckResult) -> str:\n builder = StringBuilder()\n\n if result.success:\n builder.add(\n Color.format(\n '[check][{}][end] ... [pass]{}[end]'.format(\n result.config.check_type, result.status.upper()\n )\n )\n )\n else:\n if result.status == STATUS_FAIL:\n builder.add(\n Color.format(\n '[check][{}][end] ... [fail]{}[end]'.format(\n result.config.check_type, result.status.upper()\n )\n )\n )\n else:\n builder.add(\n Color.format(\n '[check][{}][end] ... [error]{}[end]'.format(\n result.config.check_type, result.status.upper()\n )\n )\n )\n builder.add(\n Color.format('[h]Error code[end]: {}'.format(result.error_code))\n )\n builder.add(Color.format('[h]Details[end]:'))\n builder.add(pyaml.dump(result.details))\n builder.add()\n\n return builder.render()", "def format_result(result):\n if not isinstance(result, tuple):\n return ActionResult(result, None)\n else:\n return ActionResult(*result)", "def format(self, result, ostream):\n raise NotImplementedError", "def format(self, result=None):\n self.log_method_enter(method=self.current_method)\n self.log.debug(msg=\"trying to format result for output\")\n self.log.debug(msg=\"instantiating {} object with options {}\"\n .format(self.format_class.__name__,\n self.format_options))\n with self.format_class(**vars(self.format_options)) as f:\n output = f.format(result=result)\n self.log_method_exit(method=self.current_method)\n return output", "def format_code_output(self, result: str) -> Embed:\n logger.info(\"Formatting message output...\")\n\n zero = \"\\N{zero width space}\"\n result = re.sub(\"```\", f\"{zero}`{zero}`{zero}`{zero}\", result)\n result, exit_code = result.split(\"Exit code: \")\n icon = self.get_icon(exit_code)\n result = result.rstrip(\"\\n\")\n lines = result.count(\"\\n\")\n\n if lines > 0:\n result = [\n f\"{i:02d} | {line}\" for i, line in enumerate(result.split(\"\\n\"), 1)\n ]\n result = result[: self.max_lines] # Limiting to only 11 lines\n result = \"\\n\".join(result)\n\n embed = self.embed_helper(\n description=f\"{icon} Your {self.language} eval job has completed with return code `{exit_code}`.\",\n field=f\"```\\n{'[No output]' if result == '' else result}```\",\n )\n\n logger.info(\"Output Formatted\")\n return embed", "def transform_result(task_ex, result):\n if result.is_error():\n return result\n\n action_spec_name = spec_parser.get_task_spec(\n task_ex.spec).get_action_name()\n\n if action_spec_name:\n wf_ex = task_ex.workflow_execution\n wf_spec_name = spec_parser.get_workflow_spec(wf_ex.spec).get_name()\n\n return transform_action_result(\n wf_ex.workflow_name,\n wf_spec_name,\n action_spec_name,\n result\n )\n\n return result", "def get_request_result(self):\r\n\r\n return format_result(self.get_request_result_raw())", "def display_execution_result(\n context: ExecutionContext, event: events.AfterExecution, warnings: FrozenSet[str]\n) -> None:\n symbol: str\n color: str\n if event.status == Status.success:\n symbol, color = \".\", \"green\"\n elif event.result.checks and warn_or_success(event.result, warnings):\n symbol, color = \"W\", \"yellow\"\n elif event.status == Status.failure:\n symbol, color = \"E\", \"red\"\n else:\n # an exception occurred\n symbol, color = \"E\", \"magenta\"\n context.current_line_length += len(symbol)\n click.secho(symbol, nl=False, fg=color)", "def __update_result_file(self):\n try:\n test_failure_reason = \"\"\n for key in self.result_dict:\n tcreason = self.result_dict[key]\n tc_id = self.tc_id + \"_\" + key\n if tcreason:\n tcstatus = \"FAIL\"\n message = \"Test Case ID: %s\" % tc_id + \"\\nTest Case\"\\\n \" Status: %s\" % tcstatus + \"\\nFailure \"\\\n \"Reason: %s\" % tcreason\n decorated_msg = self.common.get_decorated_message(\n message, \"-\", 70)\n LOG_OBJ.info(decorated_msg)\n print decorated_msg\n if tcreason not in test_failure_reason:\n test_failure_reason += tcreason\n else:\n tcstatus = \"PASS\"\n message = \"Test Case ID: %s\" % tc_id + \"\\nTest Case\"\\\n \" Status: %s\" % tcstatus\n decorated_msg = self.common.get_decorated_message(\n message, \"-\", 70)\n LOG_OBJ.info(decorated_msg)\n\n tcstatus = 'FAIL' if test_failure_reason else \"PASS\"\n # During stress testing don't update result file.\n if \"main\" not in threading.currentThread().getName().lower():\n StressTestHelper().stress_test_result_update(\n self.tc_id, tcstatus, test_failure_reason)\n return\n self.common.test_result_update(\n self.tc_id, tcstatus, test_failure_reason)\n except Exception as err:\n LOG_OBJ.exception(err)\n return \"Exception occurred while updating test result\"\\\n \" in result file.\"", "def dump(self, result: Result) -> str:\n return json.dumps(asdict(result))", "def table_of_result(self, result):\n rows = []\n def add(label, lst, style):\n for test, backtrace in lst:\n rows.append([label, result.getDescription(test), style])\n add(CHECK, result.successes, u'unittest-success')\n add(CROSS, result.failures, u'unittest-failure')\n add(HEAVY_CROSS, result.errors, u'unittest-error')\n add(SKIP, result.skipped, u'unittest-skipped')\n add(CHECK, result.expectedFailures, u'unittest-success')\n add(CROSS, result.unexpectedSuccesses, u'unittest-failure')\n bd = u'\\n'.join([u'<p class=\"unittest-test {}\">{}<span class=\"unittest-name\">{}</span></p>'.format(row[2], row[0], row[1]) for row in rows])\n return u'{}'.format(bd)", "def format_results(self, json_results, xml_results):\n\n formatted_results = dict()\n formatted_results[\"rc\"] = self.last_http_return_code\n formatted_results[\"http_metadata\"] = {\n \"status\": {\n \"code\": self.last_http_return_code,\n \"message\": FSM_RC[\"fsm_return_codes\"][formatted_results[\"rc\"]][\"msg\"]\n },\n \"url\": self.last_http_return_url,\n\n }\n # IF HEADERS ARE PRESENT, TRY TO ADD THEM\n try:\n formatted_results[\"http_metadata\"][\"headers\"] = self.last_http_return_headers\n except BaseException as err:\n pass\n\n # ADD THE RESULTS\n try:\n if json_results:\n formatted_results[\"json_results\"] = json_results\n else:\n formatted_results[\"json_results\"] = None\n except BaseException as err:\n pass\n # ADD THE XML RESULTS\n try:\n if xml_results:\n formatted_results[\"xml_results\"] = xml_results\n else:\n formatted_results[\"xml_results\"] = None\n except BaseException as err:\n pass\n return formatted_results", "def _export_result_dict(context, steps=None, messages=None):\n return {'steps': steps,\n 'messages': messages,\n 'tarball': context.getArchive(),\n 'filename': context.getArchiveFilename()}", "def summariseResult(self, test):\n assert not test.isSuite\n if self.mode not in [\"FAIL-SUMMARY\"]:\n return\n result = test.result\n itemType = \"test\"\n if test.isSuite: ## TODO???\n itemType = \"suite\"\n lines, textLen, dotLen = self.formatAnnouncement(test.summary,\n number=test.number, testID=test.testID)\n text = \"\\n\".join(lines)\n writer = sNormal.write\n writer(\"%s%s%12s\\n\" % (text, \".\" * dotLen, result.state))", "def save_result(self, results):\n combined_stdout = u\"\"\n combined_return_code = 0\n\n for result in results:\n cmd = result[\"cmd\"]\n if not isinstance(cmd, str):\n cmd = \" \".join(cmd)\n\n if hasattr(cmd, \"decode\"):\n # This is needed in Python 2 to allow unicode paths\n cmd = cmd.decode(\"utf-8\")\n\n combined_stdout += u\"Running: {0}\\n\".format(cmd)\n\n if result[\"stdout\"]:\n combined_stdout += result[\"stdout\"]\n\n if result[\"return_code\"] != 0:\n # Save the last non-zero return code\n combined_return_code = result[\"return_code\"]\n\n combined_result = (\n self.format_command(self.inputs[0]),\n combined_return_code,\n combined_stdout,\n )\n\n testlock.acquire()\n try:\n bld = self.generator.bld\n Logs.debug(u\"wr: %r\", result)\n\n if hasattr(bld, \"runner_results\"):\n bld.runner_results.append(combined_result)\n else:\n bld.runner_results = [combined_result]\n\n finally:\n testlock.release()", "def dumps_result(self, result: BaseResult) -> str:\n try:\n # Dumps result without any check. Catch exception to handle dump errors\n dumped_result = self.marshaller.dumps(result.serializable_data())\n except Exception as exc:\n # Error on result serialization: result become an error...\n error_msg = f\"Unable to serialize result: {exc}\"\n logger.error(error_msg, exc_info=settings.MODERNRPC_LOG_EXCEPTIONS)\n error_result = XmlErrorResult(RPC_INTERNAL_ERROR, error_msg)\n dumped_result = self.marshaller.dumps(error_result.serializable_data())\n\n # Finally, dumps the result into full response content\n final_content = (\n \"\"\"\n <?xml version=\"1.0\"?>\n <methodResponse>\n %s\n </methodResponse>\n \"\"\"\n % dumped_result\n )\n return dedent(final_content).strip()", "def summariseSuiteResult(self, suite):\n try:\n if self.mode not in [\"FAIL-SUMMARY\"]:\n return\n result = suite.result\n itemType = \"suite\"\n lines, textLen, dotLen = self.formatAnnouncement(suite.summary)\n text = \"\\n\".join(lines)\n writer = sNormal.write\n writer(\"%s%s%12s\\n\" % (text, \".\" * dotLen, result.state))\n finally:\n self.level += 1", "def format(self, result, read):\n raise NotImplementedError()", "def log_result(self, result):\n dryrun_string = ''\n if result['dry_run']:\n dryrun_string = 'DRY-RUN: '\n self.loggit.debug('{0}Result: {1}'.format(dryrun_string, result))\n rollover_string = '{0}Old index {1} rolled over to new index {2}'.format(\n dryrun_string,\n result['old_index'],\n result['new_index']\n )\n # Success is determined by at one condition being True\n success = False\n for k in list(result['conditions'].keys()):\n if result['conditions'][k]:\n success = True\n if result['dry_run'] and success: # log \"successful\" dry-run\n self.loggit.info(rollover_string)\n elif result['rolled_over']:\n self.loggit.info(rollover_string)\n else:\n self.loggit.info(\n '{0}Rollover conditions not met. Index {0} not rolled over.'.format(\n dryrun_string,\n result['old_index'])\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the contributors and languages for the repo
def _get_repo_contributors_and_languages(self, repo) -> dict: print(f"start getting contributors and languages for {repo.name}") languages = self.service.get_languages(repo) contributors = self.service.get_contributors(repo) return { "users": contributors, "repo": repo.name, "languages": languages, }
[ "def getAllContributors(server,repo):\n contributors={}\n url=server+\"/repos/\"+repo+\"/stats/contributors\"\n res=conn.get(url)\n dicres=json.loads(res.text)\n for contributor in dicres:\n additionDeletion=getAdditionsDeletions(contributor.get(\"weeks\"))\n additions=str(additionDeletion[0])\n deletions=str(additionDeletion[1])\n commits=str(contributor.get(\"total\"))\n #contributor will be -> author_id:(commit,additions,deletions)\n contributors[str(contributor.get(\"author\").get(\"id\"))]=(commits,additions,deletions)\n return contributors", "def getAllDevelopers(server,repo):\n nameDevelopers=[]\n #Get all contributors of repository\n url=server+\"/repos/\"+repo+\"/stats/contributors\"\n res=conn.get(url)\n dicres=json.loads(res.text)\n for developer in dicres:\n nameDevelopers.append(developer.get(\"author\").get(\"login\"))\n return nameDevelopers", "def contributors(self) -> Tuple[str, ...]:\n enforce(self.is_set(\"contributors\"), \"'contributors' content is not set.\")\n return cast(Tuple[str, ...], self.get(\"contributors\"))", "def get_repo(self,args):\n\t\trepo_list=[]\n\t\tif(args.repo == 'all'):\n\t\t\trepo_list = [repo.name for repo in self.organization.get_repos()]\n\t\telse:\n\t\t\trepo_list = [args.repo]\n\n\t\treturn repo_list", "def contributors(self):\n if not self._contributors:\n self._contributors = self.get_contributors()\n return self._contributors", "def getGitData(username):\n\n # fetch access token for given username\n conn = create_connection('test.db')\n query = f\"SELECT token from Token WHERE g_username='{username}';\"\n result = execute_read_query(conn, query)\n token = (result[0])[0]\n \n # appropriate header for GitHub API '/usr' endpoint\n headers = {'Authorization': f\"token {token}\"}\n usrUrl = \"https://api.github.com/user\"\n res = requests.get(url=usrUrl, headers=headers)\n res = res.json()\n\n # fetch required details from response\n response = {}\n response['id'] = res['login']\n response['followers'] = res['followers']\n response['public_repos'] = res['public_repos']\n\n # request for fetching repository details\n repoUrl = f\"https://api.github.com/users/{username}/repos\"\n res = requests.get(url=repoUrl, headers=headers)\n repo_data = res.json()\n\n # store all repository details in lst\n lst = []\n stars = 0\n languages = {}\n for repo in repo_data:\n obj = {}\n obj['name'] = repo['name']\n obj['stars'] = repo['stargazers_count']\n obj['language'] = repo['language']\n obj['description'] = repo['description']\n obj['forks_count'] = repo['forks_count']\n\n key = repo['language']\n if key is not None:\n key = str(repo['language'])\n if key in languages:\n languages[key] += 1\n else:\n languages[key] = 0\n stars += obj['stars']\n lst.append(obj)\n\n # sort all repos on number of stars\n def func(item): return item[1]\n languages_list = [k for k, v in sorted(languages.items(), key=func)]\n languages_list.reverse()\n response['stars'] = stars\n response['repo_data'] = lst\n response['languages'] = languages_list\n\n return response", "def fetch_authors_other_work(self):\r\n sumGPS = gitProfileSet(\"inverse_\"+self.name)\r\n repoList = []\r\n \r\n for author in tqdm(self.authors.values()):\r\n repoList.extend([repo.clone_url for repo in author.getRepos()])\r\n\r\n return repoList", "def get_repo_list(self):\r\n return self._repo.keys()", "def get_wiki_contributors(self, subreddit, *args, **kwargs):\n url = self.config['wiki_contributors'].format(\n subreddit=six.text_type(subreddit))\n return self._get_userlist(url, user_only=True, *args, **kwargs)", "def get_repositories():\n\n username = userEntry.get()\n organization = orgEntry.get()\n password = passEntry.get()\n\n if username == \"\":\n messagebox.showinfo(\"Missing Username\", \"Please enter your GitHub account username in the field provided.\")\n return\n if organization == \"\":\n messagebox.showinfo(\"Missing Organization\", \"Please enter a GitHub organization in the field provided\")\n return\n if password == \"\":\n messagebox.showinfo(\"Missing Password\", \"Please enter your GitHub account password.\")\n return\n\n connection = Connection.Connection(username, password)\n repo_licenses = connection.get_repos(organization)\n\n if repo_licenses is None:\n messagebox.showerror(\"Invalid credentials.\", \"Please enter valid credentials.\")\n else:\n repo_win = gui.Tk()\n repo_win.title(\"Repositories\")\n row = 0\n for key in repo_licenses:\n Label(repo_win, text=key, justify=gui.LEFT).grid(padx=10, pady=7, row=row, column=0)\n if repo_licenses[key] == \"No License\":\n add_button = Button(repo_win, text=\"Add license\",\n command=partial(get_licenses,connection, organization, key),\n bg=\"#b3b8ba\")\n add_button.grid(padx=10, pady=7, row=row, column=1)\n else:\n Label(repo_win, text=repo_licenses[key], justify=gui.LEFT).grid(padx=10, pady=7, row=row, column=1)\n row = row + 1", "def get_github_orgs():\n gqlapi = gql.get_api()\n return gqlapi.query(GITHUB_ORGS_QUERY)[\"orgs\"]", "def get_repos():\n try:\n with open(\"repos.json\") as data_file: \n repos = json.load(data_file)\n return repos\n except:\n print \"Error loading repos.json\"\n sys.exit()", "def __format_repo(repo):\n data = repo.split(\"/\")\n organization = data[0]\n repo_name = data[1]\n return organization, repo_name", "def repositories_get(self):\n repos = list()\n response_repos = self.session.get('%s/user/repos' % (self.base_url))\n if response_repos.status_code == 200:\n for repo in response_repos.json():\n repos.append([repo['id'], repo['full_name'], repo['ssh_url']])\n return repos\n else:\n raise GogsBaseException()", "def contributing_text():\n return P.CONTRIBUTING.read_text(encoding=\"utf-8\")", "def contributor_group(self):\n items = self._head.get('source', {}).get('contributor-group', [])\n if len(items) == 0:\n return None\n if not isinstance(items, list):\n items = [items]\n out = []\n fields = 'given_name initials surname indexed_name role'\n pers = namedtuple('Contributor', fields)\n for item in items:\n entry = item.get('contributor', {})\n new = pers(indexed_name=entry.get('ce:indexed-name'),\n role=entry.get('@role'), surname=entry.get('ce:surname'),\n given_name=entry.get('ce:given-name'),\n initials=entry.get('ce:initials'))\n out.append(new)\n return out", "def test_user_list_repos(self):\n pass", "def getContributors(patches):\n\t\t\n\tdata = []\n\tcontributors = []\n\n\t# group contributions by username \n\tdata = sorted(patches, key=itemgetter('username'))\n\t\n\tfor k, g in itertools.groupby(data, key=lambda x:x['username']):\n\t\tcontributors.append(list(g)) # Store group iterator as a list\n\n\treturn contributors", "def _get_official_repos():\n seen_repos = set()\n regular_repos = []\n infra_repos = []\n deliverables = set()\n retired_repos = []\n\n # NOTE(dhellmann): We could get fancy and support loading\n # governance data from a local repo so we could support zuul's\n # Depends-On feature to link together patches, but that would\n # complicate the build environment needed for an individual\n # developer, so we just always pull from the remote repo for now.\n gov_data = governance.Governance.from_remote_repo()\n\n for repository in gov_data.get_repositories():\n repo = repository.name\n base = repo.rsplit('/')[-1]\n\n if repo in _RETIRED_REPOS:\n # Skip in case repo is not removed yet from governance\n # or is only deprecated.\n continue\n if repo in seen_repos:\n # Sometimes the governance data ends up with\n # duplicates, but we don't want duplicate rules to\n # be generated.\n continue\n seen_repos.add(repo)\n deliverables.add(repository.deliverable.name)\n\n if repository.deliverable.team.name == 'Infrastructure':\n add = infra_repos.append\n else:\n add = regular_repos.append\n # Overwrite infra list for a few repositories\n if repo in _INFRA_REPOS_EXCEPTION:\n regular_repos.append({'name': repo, 'base': base})\n elif repo not in _IGNORED_REPOS:\n add({'name': repo, 'base': base})\n\n for repo in _RETIRED_REPOS:\n base = repo.rsplit('/')[-1]\n retired_repos.append({'name': repo, 'base': base})\n\n return (regular_repos, infra_repos, retired_repos,\n list(sorted(deliverables)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Group the repositories to the user, so each user will has a list of repositories.
def _aggregate_repositories_to_user(self, data: dict) -> dict: results = dict() for result in data: # result will be a list of users and repo object and repo's languages. for user in result["users"]: # check if we get this user on any repo before or not # if found, we will append this repo to his list if user["id"] in results: results[user["id"]]["repos"].append(result["repo"]) # if not found, will add the first repo for this user else: results[user["id"]] = { "user": user, "repos": [result["repo"]], "languages": result["languages"], } return results
[ "def get_user_repos(username, org_id=None):\n if org_id is None:\n owned_repos = seaserv.list_personal_repos_by_owner(username)\n shared_repos = seafile_api.get_share_in_repo_list(username, -1, -1)\n groups_repos = []\n for group in seaserv.get_personal_groups_by_user(username):\n # TODO: use seafile_api.get_group_repos\n groups_repos += seaserv.get_group_repos(group.id, username)\n if CLOUD_MODE:\n public_repos = []\n else:\n public_repos = seaserv.list_inner_pub_repos(username)\n\n for r in shared_repos + public_repos:\n # collumn names in shared_repo struct are not same as owned or group\n # repos.\n r.id = r.repo_id\n r.name = r.repo_name\n r.desc = r.repo_desc\n r.last_modify = r.last_modified\n else:\n owned_repos = seafile_api.get_org_owned_repo_list(org_id, username)\n shared_repos = seafile_api.get_org_share_in_repo_list(org_id, username,\n -1, -1)\n groups_repos = []\n for group in seaserv.get_org_groups_by_user(org_id, username):\n groups_repos += seafile_api.get_org_group_repos(org_id, group.id)\n public_repos = seaserv.seafserv_threaded_rpc.list_org_inner_pub_repos(org_id)\n\n for r in shared_repos + groups_repos + public_repos:\n # collumn names in shared_repo struct are not same as owned\n # repos.\n r.id = r.repo_id\n r.name = r.repo_name\n r.desc = r.repo_desc\n r.last_modify = r.last_modified\n\n return (owned_repos, shared_repos, groups_repos, public_repos)", "def get_repositories(user):\n\tprint(\"Mining {}...\".format(user), end='', flush=True) \n\t\t\n\treq_string = \"https://api.github.com/users/\" + user + \"/repos\"\n\tr = requests.get(req_string, headers = {'Authorization': 'token 6ca2047ccbab4ad1a2f472e35e2e659c8861bfb7'}).json()\n\n\tprint(\"finished!!!\")\n\n\treturn r", "def use_repos_for_user(self, owner):\n try:\n self._add_to_repositories_to_use(self.github_api.iter_user_repos(owner))\n except GitHubError as ex:\n raise UserError(f\"Failed to get user repositories. {str(ex)}\")", "def user_repositories_view(username):\n\n api = GithubApiService(\n github_api_token=current_app.config['GITHUB_API_TOKEN']\n )\n\n try:\n page = int(request.args.get('page') or 0)\n except ValueError:\n # Page is not an instance of an integer.\n page = 0\n\n try:\n repositories = api.get_user_repositories(username, page)\n except UserNotFoundServiceException:\n abort(404)\n except BadCredentialsServiceException:\n # Problem with token.\n abort(500)\n except ApiRateLimitExceededServiceException:\n # Exceeded api calls limit.\n abort(500)\n except UnprocessableEntityServiceException as e:\n # Api could not process the request correctly.\n abort(422, str(e))\n\n return {\n 'repositories': repositories,\n 'repositories_count': len(repositories),\n 'username': username\n }", "def test_user_list_repos(self):\n pass", "def fetch_all_repositories(user):\n resp_repos = requests.get(\n 'https://api.github.com/users/' + user + '/repos',\n auth=('Holberton_School', 'fffa38b10948aa7eff293682308672bc95672ae3')\n )\n repos_json = resp_repos.json()\n repos_dict = {}\n for i in range(len(repos_json)):\n name = repos_json[i][\"name\"]\n date = datetime.datetime.strptime(\n repos_json[i][\"created_at\"], '%Y-%m-%dT%H:%M:%SZ'\n )\n try:\n sha = requests.get('https://api.github.com/repos/' + user + '/' + name + '/commits', auth=('Holberton_School', 'fffa38b10948aa7eff293682308672bc95672ae3')).json()[0][\"sha\"]\n except:\n print \"error getting sha for %s\" % (name)\n if name not in repos_dict:\n repos_dict[name] = [date, sha]\n\n sorted_list = sort_dict_by_date(repos_dict)\n\n for repo in sorted_list:\n print repo\n print \"\\t%s\" % (str(repos_dict[repo][0]))\n print \"\\t%s\\n\" % (repos_dict[repo][1])", "def get_repositories(self) -> List[\"Repository\"]:\n url = f\"/users/{self.username}/repos\"\n results = self.gitea.requests_get_paginated(url)\n return [Repository.parse_response(self.gitea, result) for result in results]", "def get_repositories():\n\n username = userEntry.get()\n organization = orgEntry.get()\n password = passEntry.get()\n\n if username == \"\":\n messagebox.showinfo(\"Missing Username\", \"Please enter your GitHub account username in the field provided.\")\n return\n if organization == \"\":\n messagebox.showinfo(\"Missing Organization\", \"Please enter a GitHub organization in the field provided\")\n return\n if password == \"\":\n messagebox.showinfo(\"Missing Password\", \"Please enter your GitHub account password.\")\n return\n\n connection = Connection.Connection(username, password)\n repo_licenses = connection.get_repos(organization)\n\n if repo_licenses is None:\n messagebox.showerror(\"Invalid credentials.\", \"Please enter valid credentials.\")\n else:\n repo_win = gui.Tk()\n repo_win.title(\"Repositories\")\n row = 0\n for key in repo_licenses:\n Label(repo_win, text=key, justify=gui.LEFT).grid(padx=10, pady=7, row=row, column=0)\n if repo_licenses[key] == \"No License\":\n add_button = Button(repo_win, text=\"Add license\",\n command=partial(get_licenses,connection, organization, key),\n bg=\"#b3b8ba\")\n add_button.grid(padx=10, pady=7, row=row, column=1)\n else:\n Label(repo_win, text=repo_licenses[key], justify=gui.LEFT).grid(padx=10, pady=7, row=row, column=1)\n row = row + 1", "def getRepos(self):\n self._validateValues(accountName=self.accountName)\n return self._callBitbucketRestAPI(BitbucketRESTCall.USER, uriParts=[str(BitbucketRESTCall.REPOSITORIES)])", "def repos_from_k8s_group(k8s_group):\n repos = {}\n subprojects = k8s_group.get('subprojects', [])\n if subprojects is None:\n subprojects = []\n for sp in subprojects:\n for uri in sp['owners']:\n owners_path = re.sub(r\"https://raw.githubusercontent.com/(.*)/master/(.*)\",r\"\\1/\\2\",uri)\n path_parts = owners_path.split('/')\n # org/repo is owned by k8s_group if org/repo/OWNERS os in one of their subprojects\n if path_parts[2] == 'OWNERS':\n repo = '/'.join(path_parts[0:2])\n repos[repo] = True\n return sorted(repos.keys())", "def getGitData(username):\n\n # fetch access token for given username\n conn = create_connection('test.db')\n query = f\"SELECT token from Token WHERE g_username='{username}';\"\n result = execute_read_query(conn, query)\n token = (result[0])[0]\n \n # appropriate header for GitHub API '/usr' endpoint\n headers = {'Authorization': f\"token {token}\"}\n usrUrl = \"https://api.github.com/user\"\n res = requests.get(url=usrUrl, headers=headers)\n res = res.json()\n\n # fetch required details from response\n response = {}\n response['id'] = res['login']\n response['followers'] = res['followers']\n response['public_repos'] = res['public_repos']\n\n # request for fetching repository details\n repoUrl = f\"https://api.github.com/users/{username}/repos\"\n res = requests.get(url=repoUrl, headers=headers)\n repo_data = res.json()\n\n # store all repository details in lst\n lst = []\n stars = 0\n languages = {}\n for repo in repo_data:\n obj = {}\n obj['name'] = repo['name']\n obj['stars'] = repo['stargazers_count']\n obj['language'] = repo['language']\n obj['description'] = repo['description']\n obj['forks_count'] = repo['forks_count']\n\n key = repo['language']\n if key is not None:\n key = str(repo['language'])\n if key in languages:\n languages[key] += 1\n else:\n languages[key] = 0\n stars += obj['stars']\n lst.append(obj)\n\n # sort all repos on number of stars\n def func(item): return item[1]\n languages_list = [k for k, v in sorted(languages.items(), key=func)]\n languages_list.reverse()\n response['stars'] = stars\n response['repo_data'] = lst\n response['languages'] = languages_list\n\n return response", "def get_shared_groups_by_repo_and_user(repo_id, username):\n repo_shared_groups = seaserv.get_shared_groups_by_repo(repo_id)\n\n # Filter out groups that user is joined.\n groups = [x for x in repo_shared_groups if seaserv.is_group_user(x.id, username)]\n return groups", "def repositories_get(self):\n repos = list()\n response_repos = self.session.get('%s/user/repos' % (self.base_url))\n if response_repos.status_code == 200:\n for repo in response_repos.json():\n repos.append([repo['id'], repo['full_name'], repo['ssh_url']])\n return repos\n else:\n raise GogsBaseException()", "def get_repositories():\n github = GitHubAPI(user_id=current_user.id)\n ctx = dict(connected=False)\n if github.session_token:\n # Generate the repositories view object\n repos = github.get_user_repositories()\n last_sync = github.get_last_sync_time()\n\n ctx.update(\n {\n \"connected\": True,\n \"repos\": sorted(repos.items(), key=lambda x: x[1][\"full_name\"]),\n \"last_sync\": last_sync,\n }\n )\n\n return render_template(current_app.config[\"GITHUB_TEMPLATE_INDEX\"], **ctx)", "def get_accessible_repos(self) -> List[\"Repository\"]:\n results = self.gitea.requests_get(\"/user/repos\", sudo=self)\n return [Repository.parse_response(self, result) for result in results]", "def add_all_repos_for_owner(self, owner_name=\"ISISComputingGroup\"):\n try:\n organisation = self.github_api.organization(owner_name)\n except GitHubError as ex:\n raise UserError(f\"Failed to get owner as organisation. {str(ex)}\")\n\n try:\n if organisation is None:\n print(f\"Owner {owner_name} not found as organisation, defaulting to user\")\n iter_repos = self.github_api.iter_user_repos(owner_name)\n else:\n iter_repos = organisation.iter_repos()\n self._add_to_repositories_to_use(iter_repos)\n except GitHubError as ex:\n raise UserError(f\"Failed to get owner's repositories. {str(ex)}\")", "def repos(self, api, master_repo_names):\n target_group_id = api._gitlab.tests_only_target_group_id\n groups = [\n api._gitlab.groups.create(\n dict(\n name=str(group), path=str(group), parent_id=target_group_id\n )\n )\n for group in constants.STUDENTS\n ]\n yield [\n plug.Repo(\n name=plug.generate_repo_name(group.name, master_name),\n description=\"Student repo\",\n private=True,\n team_id=group.id,\n )\n for group in groups\n for master_name in master_repo_names\n ]", "def get_user_starred_repositories(user, kind_filter=\"image\"):\n try:\n repo_kind = Repository.kind.get_id(kind_filter)\n except RepositoryKind.DoesNotExist:\n raise DataModelException(\"Unknown kind of repository\")\n\n query = (\n Repository.select(Repository, User, Visibility, Repository.id.alias(\"rid\"))\n .join(Star)\n .switch(Repository)\n .join(User)\n .switch(Repository)\n .join(Visibility)\n .where(Star.user == user, Repository.kind == repo_kind)\n .where(Repository.state != RepositoryState.MARKED_FOR_DELETION)\n )\n\n return query", "def get_visible_repositories(self):\n url = current_user_repos_url()\n return self._dispatcher.dispatch(url, access_token=self._access_token,\n access_token_secret=self._access_token_secret)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the report filename in the directory. The directory will be created if it's not exists.
def filename(self): # create the folder if it doesn't exist' if not os.path.exists(self.report_path): os.makedirs(self.report_path) time_now = datetime.now().strftime("%m_%d_%Y_%H_%M") filename = f"{self.report_path}/report_{time_now}.csv" return os.path.join(self.report_path, filename)
[ "def create_report_dir(self) -> str:\n return create_report_dir_with_rotation(self.dir)", "def generate_file_name():\n import datetime\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n filename = \"game saved at {}\".format(now)\n return filename", "def build_answers_distribution_report_name(problem):\n running_report_name = course_and_time_based_filename_generator(problem.location,\n problem.display_name)\n running_report_name += u\".csv\"\n return running_report_name[:255]", "def gen_filename() -> str:\n return str(datetime.timestamp(datetime.now())).replace(\".\", \"\")", "def generate_report():", "def create_report_dir(self, top_dir):\n raise NotImplementedError()", "def generate_missing_file_report(directory):\n report_filename = os.path.join(directory, 'REAPER Clip Splicer report ' +\n str(datetime.datetime.now()) + \".txt\")\n report_file = open(report_filename, \"w\")\n report_string = \"\"\n report_string += \"REAPER Clip Splicer report\\n\"\n report_string += str(datetime.datetime.now()) + \"\\n\\n\"\n report_string += \"MISSING Files\\n\\n\"\n for track in missing_files:\n report_string += track + \"\\n\\n\"\n for missing_filename in missing_files[track]:\n report_string += missing_filename + \"\\n\"\n report_string += \"\\n\"\n report_file.write(report_string)", "def create_pdf_filename(self):\n return 'EA_for_{p}{y}_{v}_pdf.csv'.format(p=self.period_name,\n y=self.iss_year,\n v=self.variable)", "def _make_filename(self, step_num, uncertain=False):\n if uncertain:\n return os.path.join(self._cache_dir,\n 'step_%06i_uncert.npz' % step_num)\n else:\n return os.path.join(self._cache_dir,\n 'step_%06i.npz' % step_num)", "def get_pdffilename(self):\n project_dir = os.path.dirname(self.template_file)\n #print yaml.load(open(os.path.join(project_dir, 'index.yaml')))\n\n pdfkeys = yaml.load(open(os.path.join(project_dir, 'index.yaml')))['pdffilename']\n filename = os.path.join(project_dir, 'reports',\n ''.join(['_'.join([self.vals[key] for key in pdfkeys]), '_', self.uniq_id, '.pdf']))\n\n #TODO: uniq_id is still not really unique and there is small theoretical possiblity\n # that filename may reflect older patient. However this will happen only if the\n # older record is deleted, so should not matter much.\n return filename", "def reports_dir():\n return _mkifnotexists(\"web/reports\")", "def _generate_log_path(self):\n file_name = self.if_name + \"_\" + \\\n datetime.today().strftime(\"%Y%m%d_%H%M%S\")\n return os.path.join(self.log_dir, file_name)", "def generate_report(self):\n if self.report_format == \"csv\":\n print(\"[+] Building the report -- you selected a csv report.\")\n self.output_csv_report = self._build_output_csv_file_name()\n self.write_csv_report()\n elif self.report_format == \"word\":\n print(\"[+] Building the report -- you selected a Word/docx report.\")\n print(\"[+] Looking for the template.docx to be used for the Word report.\")\n if os.path.isfile(\"template.docx\"):\n print(\"[+] Template was found -- proceeding with report generation...\")\n print(\"L.. This may take a while if you provided a lot of \\\nIDs for a combined report or have a lot of targets.\")\n self.output_word_report = self._build_output_word_file_name()\n self.write_word_report()\n else:\n print(\"[!] Could not find the template document! Make sure \\\n'template.docx' is in the GoReport directory.\")\n sys.exit()\n elif self.report_format == \"quick\":\n print(\"[+] Quick report stats:\")\n self.get_quick_stats()", "def generate_filename(self, item):\n descr = item.generate_filename_descr()\n return helpers.format_filename(descr, item.samling, item.idno)", "def create_pathname(self, output_path):\n self.generate_name()\n\n return os.path.join(output_path, self.name)", "def get_default_result_file_name(self):\n backtestResultsFolder = 'Backtest Results'\n symbol = 'Imported' if not self.symbol else self.symbol\n dateString = datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\n resultFile = f'{symbol}_backtest_results_{\"_\".join(self.interval.lower().split())}-{dateString}.txt'\n os.chdir('../')\n\n if not os.path.exists(backtestResultsFolder):\n os.mkdir(backtestResultsFolder)\n os.chdir(backtestResultsFolder)\n\n counter = 0\n previousFile = resultFile\n\n while os.path.exists(resultFile):\n resultFile = f'({counter}){previousFile}'\n counter += 1\n\n return resultFile", "def __set_report_path__(self):\n self.report_path = os.path.join(self.get_report_path(), \"pattern_and_similarity_report\")\n Path(self.report_path).mkdir(parents=True, exist_ok=True)", "def _build_output_csv_file_name(self):\n csv_report = \"GoPhish Results for Campaign - {}.csv\".format(self.cam_name)\n return csv_report", "def make_filename(base_dir, make_subdirs=True):\n start_time = time.time()\n\n subdir = os.path.join(base_dir, \"{:.5}\".format(str(start_time)))\n\n if not os.path.exists(subdir):\n if make_subdirs:\n try:\n os.makedirs(subdir)\n except PermissionError as e:\n LOG.error(\"Permission error encountered while trying to create \"\n f\"data sub-directory: {e}\")\n raise e\n else:\n raise FileNotFoundError(\"Subdir {} does not exist\"\n .format(subdir))\n\n time_string = int(start_time)\n filename = os.path.join(subdir, \"{}.g3\".format(time_string))\n return filename" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start point for this class, will call all services and write the results into a CSV.
def generate_report(self) -> None: csv_data = self._run() self._write_csv(csv_data)
[ "def main():\n\n ticket = get_service_ticket() # create an APIC-EM Auth ticket\n device_id_list = get_device_ids(ticket) # build a list with all device id's\n devices_info = collect_device_info(device_id_list, ticket)\n filename = get_input_file() # ask user for filename input\n output_file = open(filename, 'w', newline='')\n outputWriter = csv.writer(output_file)\n for lists in devices_info:\n outputWriter.writerow(lists)\n output_file.close()\n # pprint(devices_info) # print for data validation", "def run_pipeline():\n\n orca_df = load_input()\n orca_df = aggregate_stops(orca_df)\n routes_df = aggregate_routes(orca_df)\n\n # Write to CSV\n if not os.path.exists(WRITE_DIR):\n os.mkdir(WRITE_DIR)\n files = {'stops_aggregate.csv': orca_df, 'routes_aggregate.csv': routes_df}\n for fname in files:\n files[fname].to_csv(os.path.join(WRITE_DIR, fname), index=False)\n tqdm.write(f'Wrote {fname} to {WRITE_DIR}')", "def run(self):\n # Start by finding or creating an instance to run on. We have\n # the job_id, so get all of the job information from the database\n job_info = self.get_job_info()\n\n # Now get any existing aws instances\n instances = self.get_existing_instances()\n\n # Check if one of them will fulfil the job\n worker = self.get_worker_instance(instances, self.inst_type)\n\n # Now we have the worker instance we are going to use, so firstly\n # update the database to reflect the work_instance\n self.db_manager.update_job(self.job_id, worker.instance_id)\n self.db_manager.update_job_status(\"Deploying\", self.job_id)\n\n # Now we need to deploy the job\n worker.deploy_job(job_info)\n self.db_manager.update_job_status(\"Processing Logs\", self.job_id)\n\n\n # Retrieve the logs and put them in the shared FS\n csv_log = worker.handle_logs()\n self.db_manager.update_job_status(\"Complete\", self.job_id)\n\n return csv_log", "def export_data(self):\n # export crawled urls\n # export contacts\n # export listings", "def main():\n read_csv_dict('../data/processed/avengers_processed.csv')\n create_report('../data/processed/avengers_sorted.csv')", "def scrape(self):\n lead_df, speed_df, boulder_df, combined_df = self.make_df_from_data(self.get_sub_comp_info(self.get_complete_result_links(self.check_for_new(self.get_comp_links()))))\n\n # Merge new data with old data\n lead_df, speed_df, boulder_df, combined_df = self.merge_dfs([lead_df, speed_df, boulder_df, combined_df])\n\n # Clean data before saving\n lead_df = self.clean_lead(lead_df)\n speed_df = self.clean_speed(speed_df)\n boulder_df = self.clean_boulder(boulder_df)\n combined_df = self.clean_combined(combined_df)\n\n lead_df.to_csv('lead_results.csv', index=False)\n speed_df.to_csv('speed_results.csv', index=False)\n boulder_df.to_csv('boulder_results.csv', index=False)\n combined_df.to_csv('combined_results.csv', index=False)", "def run(self):\n logging.info(\"Running benchmark suite...\")\n for benchmark in self._benchmarks:\n result = self.run_method(benchmark)\n print(result)\n if self._table is None:\n self._table = Table([result])\n else:\n self._table.update([result])\n self.write_results()\n self.host_results()", "def run(self):\n print(\"************* Start! *****************\")\n print(\"************* Extracting data... *****************\")\n data = self.__extract_data()\n print(\"************* Data extracted *****************\")\n print(\"************* Transforming data... *****************\")\n clusters = self.__transform_data(data)\n print(\"************* Transformation is done *****************\")\n print(\"************* Saving data *****************\")\n self.__load(clusters)\n print(\"************* End! *****************\")", "def save_all_logs(self):\n\n # First get all unique properties\n # Obtain information on simulations\n simulation_dict = {\"SimulationID\": [], \"SimulationName\": []}\n self.get_unique_properties(\"simulations\", simulation_dict)\n\n # Obtain information on activities\n activity_dict = {\n \"ActivityID\": [],\n \"ActivityName\": [],\n \"EquipmentID\": [],\n \"ActivityFunction\": [],\n }\n self.get_unique_properties(\"activities\", activity_dict)\n\n # Obtain information on equipment\n equipment_dict = {\"EquipmentID\": [], \"EquipmentName\": []}\n self.get_unique_properties(\"equipment\", equipment_dict)\n\n # Obtain information on locations\n location_dict = {\n \"LocationID\": [],\n \"LocationName\": [],\n \"Longitude\": [],\n \"Latitude\": [],\n }\n self.get_unique_properties(\"location\", location_dict)\n\n # Obtain information on events\n event_dict = {\"EventID\": [], \"EventName\": []}\n self.get_unique_properties(\"events\", event_dict)\n\n # Continue with obtaining the logs, energy use and dredging spill\n self.get_equipment_log()\n self.get_energy()\n self.get_spill()\n self.get_results()\n\n # Save all as csv files\n self.generic_results.to_csv(self.location + \"generic_results.csv\", index=False)\n self.dredging_spill.to_csv(self.location + \"dredging_spill.csv\", index=False)\n self.energy_use.to_csv(self.location + \"energy_use.csv\", index=False)\n self.equipment_log.to_csv(self.location + \"equipment_log.csv\", index=False)\n self.unique_events.to_csv(self.location + \"events.csv\", index=False)\n self.unique_activities.to_csv(self.location + \"activities.csv\", index=False)\n self.unique_equipment.to_csv(self.location + \"equipment.csv\", index=False)\n self.unique_locations.to_csv(self.location + \"locations.csv\", index=False)\n self.unique_simulations.to_csv(self.location + \"simulations.csv\", index=False)", "def run(self):\n self.data_reader()\n self.data_transformer()\n self.data_writer()", "def get_api_data():\n\n # we will be returning the total number of records.\n total_records = 0\n\n # set how many records we want per page. 100 is the max.\n per_page = 100\n\n # set the location and name of the csv file.\n output_file = 'output/doe.csv'\n\n # get a dictionary with the counts of records per region.\n meta_dict = __gen_meta_dict(per_page)\n\n # set the total records as we will be returning this value.\n total_records = sum(meta_dict.values())\n print(f'Total records:{total_records}')\n\n # send dictionary to function to build csv with multiple\n # requests to api.\n if __handle_requests(per_page, meta_dict, output_file):\n print('CSV file created.')\n\n # send back just the total count.\n return total_records", "def main():\n data=loadData('FileName.csv')\n APIKey = \"put here your key\"\n\n file = open('out.txt', 'w')\n\n for i in range(0,len(data)):\n line = list(data[i])\n #The index 13 corresponds to \"From Provider\"\n #The index 19 corresponds to \"To Client\"\n fromP = line[13].replace(\" \",\",\")\n toCl = line[19].replace(\" \",\",\")\n\n #Put the distance at the end of each line\n line.append(getDistance(fromP,toCl,APIKey))\n\n #Write the information in the file\n file.writelines([\"%s;\" % item for item in line])\n file.write(\"\\n\")\n\n file.close()\n print(\"Completed task\")", "def run():\n # create a Parameters object containing current-law policy (clp) parameters\n clp = Parameters()\n\n # create a Records object (puf) containing puf.csv input records\n tax_dta = pd.read_csv('puf.csv')\n blowup_factors = './taxcalc/StageIFactors.csv'\n weights = './taxcalc/WEIGHTS.csv'\n puf = Records(tax_dta, blowup_factors, weights)\n\n # create a Calculator object using clp params and puf records\n calc = Calculator(params=clp, records=puf)\n\n # save calculated test results in output dataframe (odf)\n odf = calc.calc_all_test()\n odf = odf.T.groupby(level=0).first().T\n\n # write test output to csv file named 'results_puf.csv'\n odf.to_csv('results_puf.csv', float_format='%1.3f',\n sep=',', header=True, index=False)", "def csv():\n\n print \"HOST,GUEST,PERSISTENT,ACTIVE,LUN,LV,MASK,SNAP,POOL,SIZE\"\n\n for host in config.HOSTS:\n doms = guests(host, alldoms=True)\n\n for dom in doms:\n printcsv(host, dom)", "def main():\n Console.print_header(welcome_msg)\n parser = utils.get_input_arg_parser(description=\"Add sites in google search console base on a \"\n \"list of google analytics properties from a CSV file.\",\n parents=(tools.argparser,))\n args = parser.parse_args()\n\n search_console_settings = settings.googleapi[\"search_console\"]\n api_search_console = get_service(api_name=search_console_settings[\"api_name\"],\n api_version=search_console_settings['api_version'],\n client_secrets_path=args.credentials,\n scope=search_console_settings['scopes'],\n flags=args)\n\n batch = BatchHttpRequest(callback=batch_http_request_default_callback)\n with open(args.input_file, 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n print(\"Preparing batch request:\")\n sites_count = 0\n for row in reader:\n website_url = row[\"Properties\"]\n batch.add(api_search_console.sites().add(siteUrl=website_url),\n callback=(lambda *x: print(website_url)))\n sites_count += 1\n print(\"\\t** Analytics account: %s, Site URL: %s\" % (row[\"Account\"], website_url))\n Console.print_green(\"\\n\", sites_count, \" sites added to batch request\")\n batch.execute()\n Console.print_good_bye_message()", "def csv_output(self):\n\n # determine the file name\n csv_filename = \"subscription-%s-%siter-%s-%s.csv\" % (self.subscriptiontype,\n self.iterations,\n self.chart_type.lower(),\n self.testdatetime)\n\n # initialize the csv file\n csvfile_stream = open(csv_filename, \"w\")\n csvfile_writer = csv.writer(csvfile_stream, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n # iterate over the SIBs\n for sib in self.results.keys(): \n \n row = [sib]\n \n # add all the times\n for value in self.results[sib]:\n row.append(value)\n\n # add the mean, min, max and variance value of the times to the row\n row.append(round(nmean(self.results[sib]),3)) \n row.append(round(nmin(self.results[sib]),3)) \n row.append(round(nmax(self.results[sib]),3)) \n row.append(round(nvar(self.results[sib]),3)) \n\n # write the row\n csvfile_writer.writerow(row)\n \n # close the csv file\n csvfile_stream.close()", "def execute(self):\r\n # (1) Analyze the arguments.\r\n lat, lon, date, interval, repeat, csv, altitude = self._argument_parse()\r\n\r\n header = [[\"latitude\", lat], [\"longitude\", lon], [\"altitude(m)\", altitude]]\r\n\r\n # (2) Calculate the azimuth and elevation of the sun.\r\n loc = tzl.get_localzone()\r\n result = [[\"Date\", \"AZ(deg)\", \"EL(deg)\"]]\r\n for _ in range(repeat):\r\n az, el = self._calc(date, lat, lon, altitude)\r\n date_str = date.astimezone(loc).strftime(\"%Y/%m/%d %H:%M:%S\")\r\n result.append([date_str, az, el])\r\n date = date + dt.timedelta(seconds=interval)\r\n\r\n # (3) View the results or save to a csv file.\r\n if csv == \"\":\r\n self._print_result(header, result)\r\n else:\r\n self._save_to_csv(csv, header, result)", "def generate_csv():\n\tdata_frame = get_all_occupancy_data(False)\n\tdata_frame = resample_timestamp(data_frame)\n\tprint('Resample time stamp DONE')\n\tdata_frame = clean_data(data_frame)\n\tprint('Clean data DONE')\n\tdata_frame = add_public_holidays(data_frame)\n\tprint('Add holidays DONE')\n\tdata_frame = add_weather_info_to_data(data_frame)\n\tprint('Add weather DONE')\n\tdata_frame = add_lines_info_to_data(data_frame)\n\tprint('Add lines DONE')\n\tdata_frame = cut_weather(data_frame, True)\n\tprint('Cut weather DONE')\n\tdata_frame = cut_lines_reservation(data_frame)\n\tprint('Cut lines DONE')\n\tsave_data_to_csv(data_frame, DATASET_CSV_PATH)\n\t#split_csv(data_frame)", "def run(self) -> list:\n self.execute_searches()\n return self.get_results_data()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets image IDs from the list of images or dataset
def get_image_list(conn,parameter_map): # Get images or datasets message = "" objects, log_message = script_utils.get_objects(conn, parameter_map) message += log_message if not objects: return None, message data_type = parameter_map["Data_Type"] if data_type == "Image": objects.sort(key=lambda x: (x.getName())) # Sort images by name image_ids = [image.id for image in objects] #[image.id for image in objects] else: for dataset in objects: images = list(dataset.listChildren()) if not images: continue images.sort(key=lambda x: (x.getName())) image_ids = [i.getId() for i in images] return image_ids, message
[ "def get_image_aids(ibs, gid_list):\n # print('gid_list = %r' % (gid_list,))\n # FIXME: MAKE SQL-METHOD FOR NON-ROWID GETTERS\n colnames = ('annot_rowid',)\n aids_list = ibs.db.get(ANNOTATION_TABLE, colnames, gid_list,\n id_colname='image_rowid', unpack_scalars=False)\n #print('aids_list = %r' % (aids_list,))\n return aids_list", "def get_image_uuids(ibs, gid_list):\n image_uuid_list = ibs.db.get(IMAGE_TABLE, ('image_uuid',), gid_list)\n return image_uuid_list", "def get_image_eids(ibs, gid_list):\n # FIXME: MAKE SQL-METHOD FOR NON-ROWID GETTERS\n colnames = ('encounter_rowid',)\n eids_list = ibs.db.get(EG_RELATION_TABLE, colnames, gid_list,\n id_colname='image_rowid', unpack_scalars=False)\n return eids_list", "def loadImgs(self, ids=[]):\n ids = ids if isinstance(ids, list) else [ids]\n ids = self._filterImgIds(ids)\n if len(ids) == 0:\n return []\n images = self.dataset['images']\n return [images[id] for id in ids]", "def get_images(ibs, gid_list):\n gpath_list = ibs.get_image_paths(gid_list)\n image_list = [gtool.imread(gpath) for gpath in gpath_list]\n return image_list", "def loadImgs(self, ids=[]):\n # log.info(\"-------------------------------->\")\n if type(ids) == list:\n return [self.imgs[x] for x in ids]\n else:\n return [self.imgs[ids]]", "def getImgIds(self, imgIds=[], catIds=[], refexpIds=[]):\n refexpIds = refexpIds if isinstance(refexpIds, list) else [refexpIds]\n imgIds = self._filterImgIds(imgIds)\n if len(refexpIds) == 0:\n return self.coco.getImgIds(imgIds, catIds)\n imgsForRefexps = set([])\n for refexp_id in refexpIds:\n annForRefexp = self.refexpToAnnId[refexp_id]\n imgsForRefexps.add(self.annToImgId[annForRefexp])\n cocoImgs = set(self.coco.getImgIds(imgIds, catIds))\n return list(cocoImgs.intersection(imgsForRefexps))", "def get_annot_images(ibs, aid_list):\n gid_list = ibs.get_annot_gids(aid_list)\n image_list = ibs.get_images(gid_list)\n return image_list", "def GetOriginalImages(self, IDlist):\n\n\t\t# * * * OLD FIXED-DIM VERSION * * *\n\n\t\tif self.data_loaded:\n\n\t\t\tfor ii in IDlist:\n\t\t\t\tpass\n\t\t\t\t# i = sample; % 39 when digit =1\n\t\t\t\t#\n\t\t\t\t# %% Add original image\n\t\t\t\t#\n\t\t\t\t# imagesc(reshape(X0(i,:), self.imR,[]));\n\n\t\t\treturn\n\n\t\telse:\n\t\t\traise IOError, \"Can't get image until data is loaded successfully\"", "def get_image_ids_one_object(image_ids, meta_data, objects):\n ids = []\n for img_id in image_ids:\n instance_counter = 0\n for object in objects:\n if contains_instance(meta_data, img_id, object):\n instance_counter += 1\n if instance_counter > 1:\n break\n if instance_counter == 1:\n ids.append(img_id)\n\n return ids", "def get_image_uris(ibs, gid_list):\n uri_list = ibs.db.get(IMAGE_TABLE, ('image_uri',), gid_list)\n return uri_list", "def _get_person_img_ids(cls, coco: COCO):\n cat_ids: List[int] = coco.getCatIds(catNms=['person'])\n img_ids: List[int] = coco.getImgIds(catIds=cat_ids)\n return img_ids", "def gather_images(datasets, batch_img_paths):\r\n n_batch = len(batch_img_paths)\r\n\r\n images = [[] for d in datasets]\r\n image_idx = [[] for d in datasets]\r\n\r\n for img_path in batch_img_paths:\r\n\r\n img_path_idx = index_by_path(datasets, img_path) \r\n\r\n for j, path_idx in enumerate(img_path_idx):\r\n\r\n images[j].extend(load_dataset_images(datasets[j][path_idx[0]], path_idx[1], 1))\r\n image_idx[j].append(path_idx[0]) # the model/dataset that the image is mapped to\r\n\r\n return images, image_idx", "def get_ids():", "def getSegmentedImageIds(self) -> List[str]:\n idsOfSegmented = []\n for imageId, imageData in self.nameToImageData.items():\n if imageData.isSegemented():\n idsOfSegmented.append(imageId)\n return idsOfSegmented", "def _filterImgIds(self, imgIds):\n imgIds = imgIds if isinstance(imgIds, list) else [imgIds]\n if len(imgIds) == 0:\n return self.imgIds\n imgIdsSet = set(imgIds)\n refexpImgsSet = set(self.imgIds)\n notRefexpImages = imgIdsSet - refexpImgsSet\n if len(notRefexpImages) > 0:\n warnings.warn('Images ' + str(notRefexpImages) + ' are not part of the GoogleRefexp dataset and will be ignored from the answer.', RuntimeWarning)\n imgIds = imgIdsSet.intersection(refexpImgsSet)\n return list(imgIds)", "def get_imageset_smart_waypoint_ids(ibs, imageset_rowid_list):\n id_iter = imageset_rowid_list\n colnames = (IMAGESET_SMART_WAYPOINT_ID,)\n imageset_smart_waypoint_id_list = ibs.db.get(\n const.IMAGESET_TABLE, colnames, id_iter, id_colname='rowid'\n )\n return imageset_smart_waypoint_id_list", "def getImages():\n imagesMap = {}\n images = getData()['images']\n for image in images:\n imagesMap[image['id']] = image['file_name']\n return imagesMap", "def streaming_image_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"streaming_image_ids\")", "def get_cosmos_tileids():\n fl = get_cosmos_flists()\n ids = []\n\n for f in fl:\n bname = os.path.basename(f)\n tileid = int( bname[-7:].replace('.dat','') ) \n ids.append(tileid)\n\n return ids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that all necessary dependencies for running the testsuite are met. This includes dependencies coming from the style_checker itself, as well as dependencies coming from the testsuite framework.
def check_dependencies(args): missing_deps = [] # The list of modules we need to be available in the Python # distribution. required_modules = ["pytest", "e3"] if args.verify_style_conformance: required_modules.append("flake8") # The list of programs we need to be installed and accessible # through the PATH. required_programs = [ ("/bin/csh", "/bin/csh"), ("checkstyle", "Java style checker (checkstyle)"), ("coverage", "pytest-cov plugin for pytest"), ("gnatls", "GNAT Pro in your PATH"), ] # First, check that the Python being used is recent enough. python_version = StrictVersion( "{v.major}.{v.minor}".format(v=sys.version_info)) if python_version < MINIMUM_PYTHON_VERSION: print("ERROR: Your version of Python is too old: " "({v.major}.{v.minor}.{v.micro}-{v.releaselevel})" .format(v=sys.version_info)) print(" Minimum version required: {}" .format(MINIMUM_PYTHON_VERSION)) print("Aborting.") sys.exit(1) # Next, check that all required dependencies are there. for module_name in required_modules: if importlib.util.find_spec(module_name) is None: missing_deps.append(f"Python module: {module_name}") for exe, description in required_programs: if shutil.which(exe) is None: missing_deps.append(description) # If anything was missing, report it and abort. if missing_deps: print("ERROR: The testing environment is missing the following:") for dep in missing_deps: print(f" - {dep}") sys.exit(1)
[ "def check_main_depencies():\n print(\"# Checking dependencies\")\n for tool in TOOLS_NEEDED:\n print(\"[+] Checking %s... \" % tool, end='')\n if which(tool) is not None:\n print(\"ok!\")\n else:\n print(\"missing!\")\n sys.exit()\n\n print()\n print(\"[+] Dependencies ok !\")\n print()", "def dependencies_check():\n # enforce Python minimum version\n vsys_py = sys.version_info[:3] # 4th element is a string\n if (vsys_py < PYTHON_MIN):\n vmin_py_str = \".\".join((\"%d\" % i) for i in PYTHON_MIN)\n vsys_py_str = \".\".join((\"%d\" % i) for i in vsys_py)\n depfails.append((\"bad\", (\"need Python %s but running under %s: %s\"\n % (vmin_py_str, vsys_py_str, sys.executable))))\n # report problems & exit\n for (p, v) in depfails:\n ERROR(\"%s dependency: %s\" % (p, v))\n if (len(depfails) > 0):\n sys.exit(1)", "def check_testing_requirements() -> None:\n\n testing_requirements_path = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n \"..\",\n \"..\",\n \"requirements-dev\",\n \"requirements-testing.txt\",\n )\n )\n with open(testing_requirements_path, \"r\") as testing_requirements:\n # adapted from https://stackoverflow.com/a/16298328\n pkg_resources.require(testing_requirements)", "def _check_dependencies():\n logger.info('Checking program dependencies ...')\n\n if not which('ruby'):\n logger.warn('Ruby not found')\n logger.info('Running apt-get update ...')\n run('apt-get update')\n logger.info('Installing ruby ...')\n run('apt-get install git-core ruby ruby-dev libopenssl-ruby build-essential wget ssl-cert curl rubygems -y')\n\n # Check if `gem` is available\n if not which('gem'):\n logger.warn('Gem not found')\n logger.info('Installing rubygems ...')\n run('gem install rubygems-update && update_rubygems')\n\n # Check if chef is available\n if not which('chef-solo'):\n logger.warn('chef-solo not found')\n logger.info('Installing Chef ...')\n run('gem install chef --no-ri --no-rdoc')\n\n logger.info('All dependencies is met')", "async def validate_test_only_deps(self):\n # Validate that //source doesn't depend on test_only\n queried_source_deps = await self._build_graph.query_external_deps('//source/...')\n expected_test_only_deps = self._dep_info.deps_by_use_category('test_only')\n bad_test_only_deps = expected_test_only_deps.intersection(queried_source_deps)\n if len(bad_test_only_deps) > 0:\n raise DependencyError(\n f'//source depends on test-only dependencies: {bad_test_only_deps}')\n # Validate that //test deps additional to those of //source are captured in\n # test_only.\n marginal_test_deps = await self._build_graph.query_external_deps(\n '//test/...', exclude=['//source/...'])\n bad_test_deps = marginal_test_deps.difference(expected_test_only_deps)\n unknown_bad_test_deps = [dep for dep in bad_test_deps if not test_only_ignore(dep)]\n print(f'Validating {len(expected_test_only_deps)} test-only dependencies...')\n if len(unknown_bad_test_deps) > 0:\n raise DependencyError(\n f'Missing deps in test_only \"use_category\": {unknown_bad_test_deps}')", "def check_dependencies():\n\n # Check for python version\n print(f\"Python location : {sys.executable}\")\n print(f\"Python version : {sys.version}\")\n print(f\"DiPy version : {dipy.__version__}\")\n if sys.version_info[0] < 3:\n print(\n \"WARNING : Using python 2. This Python version is no longer maintained. Use at your own risk.\"\n )\n\n # Check FSL installation\n try:\n print(f\"Your fsl directory is located here: {os.environ['FSLDIR']}\")\n except KeyError:\n raise AssertionError(\n \"You do not have FSL installed! See installation instructions here: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FslInstallation\"\n )\n\n # Check AFNI installation\n try:\n print(\n f\"Your AFNI directory is located here: {subprocess.check_output('which afni', shell=True, universal_newlines=True)}\"\n )\n except subprocess.CalledProcessError:\n raise AssertionError(\n \"You do not have AFNI installed! See installation instructions here: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/background_install/main_toc.html\"\n )", "def check_dependencies(self):\n return in_path('luacheck')", "def assert_requirements_exist(self):\n\n for test in self._tests:\n test.assert_requirements_exist()", "def check_dependencies():\n\n with open(get_ros_pkg_path('giskardpy') + '/dependencies.txt') as f:\n dependencies = f.readlines()\n\n dependencies = [x.split('#')[0] for x in dependencies]\n dependencies = [x.strip() for x in dependencies]\n\n for d in dependencies:\n try:\n pkg_resources.require(d)\n except pkg_resources.DistributionNotFound as e:\n rospkg_exists(d)\n except pkg_resources.VersionConflict as e:\n logging.logwarn('found {version_f} but version {version_r} is required'.format(version_r=str(e.req),\n version_f=str(e.dist)))", "def check_dependencies(self):\n return in_path('gjslint') or npm_exists('gjslint')", "def _validate_dependencies_met():\n # Method added in `cryptography==1.1`; not available in older versions\n from cryptography.x509.extensions import Extensions\n if getattr(Extensions, \"get_extension_for_class\", None) is None:\n raise ImportError(\"'cryptography' module missing required functionality. \"\n \"Try upgrading to v1.3.4 or newer.\")\n\n # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509\n # attribute is only present on those versions.\n from OpenSSL.crypto import X509\n x509 = X509()\n if getattr(x509, \"_x509\", None) is None:\n raise ImportError(\"'pyOpenSSL' module missing required functionality. \"\n \"Try upgrading to v0.14 or newer.\")", "def assert_requirements_linked(self):\n\n for test in self._tests:\n test.assert_requirements_linked()", "def check_dependencies(self, analyzers, heuristics):\n return True", "async def validate_control_plane_deps(self):\n # Necessary but not sufficient for controlplane. With some refactoring we could\n # probably have more precise tagging of dataplane/controlplane/other deps in\n # these paths.\n queried_controlplane_core_min_deps = await self._build_graph.query_external_deps(\n '//source/common/config/...')\n # Controlplane will always depend on API.\n expected_controlplane_core_deps = self._dep_info.deps_by_use_category('controlplane').union(\n self._dep_info.deps_by_use_category('api'))\n bad_controlplane_core_deps = queried_controlplane_core_min_deps.difference(\n expected_controlplane_core_deps)\n print(f'Validating {len(expected_controlplane_core_deps)} control-plane dependencies...')\n if len(bad_controlplane_core_deps) > 0:\n raise DependencyError(\n f'Observed controlplane core deps {queried_controlplane_core_min_deps} is not covered '\n f'by \"use_category\" implied core deps {expected_controlplane_core_deps}: '\n f'{bad_controlplane_core_deps} are missing')", "def clean_up_requirements(self):\n\n for test in self._tests:\n test.clean_up_requirements()", "def perl_deps_missing():\n global REASON\n try:\n perl.PerlCheck(misc.Options(verbosity=1))\n except SkipOptionalCheck as e:\n REASON = str(e)\n return True\n return False", "def _CheckUnwantedDependencies(input_api, output_api):\n import sys\n # We need to wait until we have an input_api object and use this\n # roundabout construct to import checkdeps because this file is\n # eval-ed and thus doesn't have __file__.\n original_sys_path = sys.path\n try:\n def GenerateCheckdepsPath(base_path):\n return input_api.os_path.join(base_path, 'buildtools', 'checkdeps')\n\n presubmit_path = input_api.PresubmitLocalPath()\n presubmit_parent_path = input_api.os_path.dirname(presubmit_path)\n not_standalone_pdfium = \\\n input_api.os_path.basename(presubmit_parent_path) == \"third_party\" and \\\n input_api.os_path.basename(presubmit_path) == \"pdfium\"\n\n sys.path.append(GenerateCheckdepsPath(presubmit_path))\n if not_standalone_pdfium:\n presubmit_grandparent_path = input_api.os_path.dirname(\n presubmit_parent_path)\n sys.path.append(GenerateCheckdepsPath(presubmit_grandparent_path))\n\n import checkdeps\n from cpp_checker import CppChecker\n from rules import Rule\n except ImportError:\n return [output_api.PresubmitError(\n 'Unable to run checkdeps, does pdfium/buildtools/checkdeps exist?')]\n finally:\n # Restore sys.path to what it was before.\n sys.path = original_sys_path\n\n added_includes = []\n for f in input_api.AffectedFiles():\n if not CppChecker.IsCppFile(f.LocalPath()):\n continue\n\n changed_lines = [line for line_num, line in f.ChangedContents()]\n added_includes.append([f.LocalPath(), changed_lines])\n\n deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())\n\n error_descriptions = []\n warning_descriptions = []\n for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(\n added_includes):\n description_with_path = '%s\\n %s' % (path, rule_description)\n if rule_type == Rule.DISALLOW:\n error_descriptions.append(description_with_path)\n else:\n warning_descriptions.append(description_with_path)\n\n results = []\n if error_descriptions:\n results.append(output_api.PresubmitError(\n 'You added one or more #includes that violate checkdeps rules.',\n error_descriptions))\n if warning_descriptions:\n results.append(output_api.PresubmitPromptOrNotify(\n 'You added one or more #includes of files that are temporarily\\n'\n 'allowed but being removed. Can you avoid introducing the\\n'\n '#include? See relevant DEPS file(s) for details and contacts.',\n warning_descriptions))\n return results", "def check_prerequisites(self):\n status_command = 'pip3 --version > /dev/null 2>&1'\n not_found = subprocess.call(\n status_command, **self.subprocess_args(shell=True)) == 127\n if not_found:\n raise base_installer.MissingPrerequisiteError(\n 'The `pip3` command was not found.')", "def run(self):\n if self.state != Check.State.NOT_RUN:\n return\n for dependency_check in self._dependencies:\n dependency_check.run()\n if dependency_check.state != Check.State.OK:\n self.state = Check.State.DEPENCENCY_ERROR\n if self.state == Check.State.DEPENCENCY_ERROR:\n return\n self._errors, self._warnings = self._run(*self._run_args, **self._run_kwargs)\n self.state = Check.State.FAILED if self._errors else Check.State.OK" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if each TSV in data/tsv is present in data/tsv/summary.tsv (and viceversa) as well as if the number of entries in each TSV in data/tsv matches its listed number of entries in data/tsv/summary.tsv. (Basically checks whether generate_summary.py has been run.)
def test_language_data_matches_summary(): name_to_count = {} with open(_TSV_SUMMARY, "r", encoding="utf-8") as lang_summary: for line in lang_summary: language = line.rstrip().split("\t") name_to_count[language[0]] = int(language[-1]) for unique_tsv in os.listdir(_TSV_DIRECTORY): with open( f"{_TSV_DIRECTORY}/{unique_tsv}", "r", encoding="utf-8" ) as tsv: num_of_entries = sum(1 for line in tsv) assert unique_tsv in name_to_count, ( f"{unique_tsv} in data/tsv but not in " "data/tsv_summary.tsv" ) assert name_to_count[unique_tsv] == num_of_entries, ( f"Number of entries in {unique_tsv} does not match " "number of entries in data/tsv_summary.tsv." ) del name_to_count[unique_tsv] assert len(name_to_count) == 0, ( "The following TSVs are listed in data/tsv_summary.tsv " "but could not be found in data/tsv: " f"{[name for name in name_to_count.keys()]}" )
[ "def test_phones_data_matches_summary():\n name_to_count = {}\n with open(_PHONES_SUMMARY, \"r\", encoding=\"utf-8\") as phones_summary:\n for line in phones_summary:\n language = line.rstrip().split(\"\\t\")\n name_to_count[language[0]] = int(language[-1])\n for phones_list in os.listdir(_PHONES_DIRECTORY):\n if phones_list.endswith(\".phones\"):\n with open(\n f\"{_PHONES_DIRECTORY}/{phones_list}\", \"r\", encoding=\"utf-8\"\n ) as tsv:\n # We exclude blank lines and comments.\n num_of_entries = sum(\n 1\n for line in tsv\n if line.strip() and not line.startswith(\"#\")\n )\n assert phones_list in name_to_count, (\n f\"{phones_list} in data/phones but not in \"\n \"data/phones/summary.tsv\"\n )\n assert name_to_count[phones_list] == num_of_entries, (\n f\"Number of entries in {phones_list} does not match \"\n \"number of entries in data/phones/summary.tsv.\"\n )\n del name_to_count[phones_list]\n assert len(name_to_count) == 0, (\n \"The following .phones files are listed in \"\n \"data/phones/summary.tsv but could not be found in \"\n f\"data/phones: {[name for name in name_to_count.keys()]}\"\n )", "def check_summary_type(feat_files, fname_files):\n sum_type = []\n for file in feat_files + fname_files:\n tmp = ''.join(file.stem.split('_')[2:])\n tmp = ''.join([l for l in tmp if not l.isdigit()])\n sum_type.append(tmp)\n if not all(stype == sum_type[0] for stype in sum_type):\n raise ValueError('The summary files specified are not of the same type.')\n return", "def summary_valid(exp_summary_dir):\n exists = check_file_exists(exp_summary_dir, 'summary.json')\n if not exists:\n return False\n summary = read_json(exp_summary_dir, 'summary.json')\n return 'title' in summary and 'value' in summary", "def _data_has_conflict(all_feature_values, conflict_feature_values):\n unique_feature_values = pd.unique(all_feature_values)\n num_of_conflicts = 0\n\n for conflict_feature_value in conflict_feature_values:\n if conflict_feature_value in unique_feature_values:\n num_of_conflicts = num_of_conflicts + 1\n\n return num_of_conflicts > 1", "def check_summary_sentences(summary_sentences, sentence):\n for sum_sentence in summary_sentences:\n in_summary = SENTENCE_COMPARATOR_OBJ.compare_sentences(sum_sentence, sentence, STOPWORDS, tokenized=True)\n\n if in_summary == 1:\n return True\n\n return False", "def is_tabular(self, data):\n lengths = [len(x) for x in data]\n return len(set(lengths))==1", "def check_uniqueness(self):\n for dset_path, incomings in self.paths.items():\n incoming_filenames = [incoming['filename'] for incoming in incomings]\n duplicates = [incoming['is_duplicate'] for incoming in incomings]\n latests = [incoming['latest'] for incoming in incomings]\n roots = [incoming['dset_root'] for incoming in incomings]\n assert latests.count(latests[0]) == len(latests)\n latest_version = latests[0]\n assert roots.count(roots[0]) == len(roots)\n dset_root = roots[0]\n latest_filenames = list()\n for _, _, filenames in os.walk(os.path.join(dset_root, latest_version)):\n latest_filenames.extend(filenames)\n # An upgrade version is different if it contains at least one file with is_duplicate = False\n # And it has the same number of files than the \"latest\" version\n if all(duplicates) and set(latest_filenames) == set(incoming_filenames):\n raise DuplicatedDataset(dset_path, latest_version)", "def check_ms_existence(self):\n\n logger.info(\"-------------------------------------------\")\n logger.info(\"Checking the existence of measurement sets.\")\n logger.info(\"-------------------------------------------\")\n\n if self._ms_dict is None:\n return()\n\n found_count = 0\n missing_count = 0\n for target in self._ms_dict.keys():\n for project_tag in self._ms_dict[target].keys():\n for array_tag in self._ms_dict[target][project_tag].keys():\n for obs_tag in self._ms_dict[target][project_tag][array_tag].keys():\n found = False\n local_found_count = 0\n for ms_root in self._ms_roots:\n this_ms = ms_root + self._ms_dict[target][project_tag][array_tag][obs_tag]['file']\n if os.path.isdir(this_ms):\n found = True\n found_count += 1\n local_found_count += 1\n if local_found_count > 1:\n logger.error(\"Found multiple copies of ms for \"+target+\" \"+project_tag+\" \"+array_tag)\n if found:\n continue\n missing_count += 1\n logger.error(\"Missing ms for \"+target+\" \"+project_tag+\" \"+array_tag)\n\n logger.info(\"Verified the existence of \"+str(found_count)+\" measurement sets.\")\n if missing_count == 0:\n logger.info(\"No measurement sets found to be missing.\")\n else:\n logger.error(\"Missing \"+str(missing_count)+\" measurement set key entries.\")\n\n return()", "def test_collate_orf_tpms_and_counts_tsv(expected_fixture, output_dir):\n output_dir_name = os.path.basename(os.path.normpath(output_dir))\n expected_file = os.path.join(expected_fixture, output_dir_name,\n workflow_r.TPMS_ALL_CDS_ALL_SAMPLES_TSV)\n if not os.path.exists(expected_file):\n pytest.skip('Skipped as expected file does not exist')\n utils.equal_tsv(expected_file,\n os.path.join(output_dir, workflow_r.TPMS_ALL_CDS_ALL_SAMPLES_TSV),\n ignore_row_order=True,\n na_to_empty_str=True)", "def close_shortReads_table_is_correct(close_shortReads_table):\n\n # if it is empty return false\n if file_is_empty(close_shortReads_table): return False\n\n # load as df\n close_shortReads_table_df = pd.read_csv(close_shortReads_table, sep=\"\\t\")\n\n # check that all the reads exist\n reads_files = set(close_shortReads_table_df[\"short_reads1\"]).union(close_shortReads_table_df[\"short_reads2\"])\n\n if any([file_is_empty(f) for f in reads_files]): return False\n else: return True", "def is_complete(run, site, fileno, infilename, output_location):\n import ROOT\n # First check to see if all outfiles exist; if not then we can return early\n ads = dets_for(site, run)\n muon_name = os.path.join(output_location, 'muons_{}_{:>04}.root'.format(run, fileno))\n events_names = [os.path.join(\n output_location, 'events_ad{}_{}_{:>04}.root'.format(ad, run, fileno)\n )\n for ad in ads]\n outfiles = [muon_name] + events_names\n for outfile in outfiles:\n if not os.path.isfile(outfile):\n return False\n # Find the timestamp of the last event from the infile\n infile = ROOT.TFile(infilename, 'READ')\n calibStats, adSimple = initialize(infile, 'AdSimpleNL')\n calibStats.AddFriend(adSimple)\n indata = RawFileAdapter(calibStats, run, fileno)\n indata.GetEntry(indata.GetEntries() - 1)\n final_timestamp = indata.timestamp\n infile.Close()\n # Ensure that each outfile has events within 5 seconds of the final timestamp\n TIMESTAMP_CRITERION = 5000000000 # 5e9 ns = 5s\n muonfile = ROOT.TFile(muon_name, 'READ')\n muons = muonfile.Get('muons')\n muons.GetEntry(muons.GetEntries() - 1)\n muons_timestamp = muons.timestamp\n muonfile.Close()\n if abs(final_timestamp - muons_timestamp) > TIMESTAMP_CRITERION:\n return False\n for events_name in events_names:\n eventsfile = ROOT.TFile(events_name, 'READ')\n events = eventsfile.Get('events')\n events.GetEntry(events.GetEntries() - 1)\n events_timestamp = events.timestamp\n eventsfile.Close()\n if abs(final_timestamp - events_timestamp) > TIMESTAMP_CRITERION:\n return False\n return True", "def validate_data_files_exists(size, data_path):\n\n # If any file is missing the test will be skipped.\n for i in range(1, size + 1):\n validate_patient_file_exist(filename=CMS_PATIENT, years=YEARS, sample_id=i, data_path=data_path)\n validate_cms_file_exist(filename=CMS_INPATIENT, sample_id=i, data_path=data_path)\n validate_cms_file_exist(filename=CMS_OUTPATIENT, sample_id=i, data_path=data_path)\n validate_claims_file_exist(filename=CMS_CARRIER_CLAIMS, claim_ids=CLAIMS, data_path=data_path)\n validate_cms_file_exist(filename=CMS_DRUG, sample_id=i, data_path=data_path)\n return True", "def check_totals(self, args, data):\r\n totals = self.make_totals(args)\r\n for row, datum in zip(totals, data):\r\n self.assertEqual(row[1:], datum)", "def test_check_files_md5(self):\n table_err = PrettyTable(['File', 'Expected', 'Actual'])\n for file_path, expected_md5 in self.Md5Csum.items():\n actual_md5 = self.hash_md5(file_path)\n if actual_md5 != expected_md5:\n table_err.add_row([file_path, expected_md5, actual_md5])\n continue\n if len(table_err._rows) > 0:\n logger.error(\"Md5sum Check:\\n\".format(table_err))\n raise Exception(\"FAILED: File md5 NOT matched!\")\n return True", "def _check_with_metadata(self, df_list: list, filepaths: list):\n # Assumes df has location field, which only presents a value\n tab_locations = [df.location.unique()[0] for df in df_list]\n counts = pd.value_counts(tab_locations)\n if (counts > 1).any(None):\n duplicated_tabs = counts[counts > 1].index.tolist()\n raise ValueError(f\"Duplicated location(s)! Location(s) {duplicated_tabs} were found in multiple tabs.\")\n # Find missing tabs / missing entries\n metadata_missing = [loc for loc in tab_locations if loc not in self.manual_countries]\n tab_missing = [loc for loc in self.manual_countries if loc not in tab_locations]\n error_msg = []\n if metadata_missing:\n error_msg.append(f\"Tab containing a location missing in LOCATIONS: {str(metadata_missing)}\")\n if tab_missing:\n error_msg.append(f\"Location found in LOCATIONS but no tab with such location was found: \"\n f\"{str(tab_missing)}\")\n if error_msg:\n error_msg = \"\\n\".join(error_msg)\n raise ValueError(error_msg)", "def verify_samples(samples: List[SampleReads]) -> bool:\n\tall_exist = True\n\tfor sample in samples:\n\t\tif not sample.exists():\n\t\t\tlogger.warning(f\"The read files for sample {sample.name} do not exist.\")\n\t\t\tlogger.warning(f\"\\tForward Read: {sample.forward}\")\n\t\t\tlogger.warning(f\"\\tReverse Read: {sample.reverse}\")\n\t\t\tall_exist = False\n\treturn all_exist", "def check_dups(clusts):\n all_clusts = set(clusts)\n trkls,dups = [],0\n for clust in all_clusts:\n if clust=='abstain':\n continue\n flag=0\n for tr in clust.split('|'):\n if tr not in trkls:\n trkls.append(tr)\n else:\n flag=1\n dups+=flag\n return dups", "def check_subgroup_outputs(output_dir, experiment_id, subgroups, file_format=\"csv\"):\n train_preprocessed_file = join(output_dir, f\"{experiment_id}_train_metadata.{file_format}\")\n train_preprocessed = DataReader.read_from_file(train_preprocessed_file, index_col=0)\n\n test_preprocessed_file = join(output_dir, f\"{experiment_id}_test_metadata.{file_format}\")\n test_preprocessed = DataReader.read_from_file(test_preprocessed_file, index_col=0)\n for group in subgroups:\n assert group in train_preprocessed.columns\n assert group in test_preprocessed.columns\n\n # check that the total sum of N per category matches the total N\n # in data composition and the total N categories matches what is\n # in overall data composition\n file_data_composition_all = join(output_dir, f\"{experiment_id}_data_composition.{file_format}\")\n df_data_composition_all = DataReader.read_from_file(file_data_composition_all)\n for group in subgroups:\n file_composition_by_group = join(\n output_dir, f\"{experiment_id}_data_composition_by_{group}.{file_format}\"\n )\n composition_by_group = DataReader.read_from_file(file_composition_by_group)\n for partition in [\"Training\", \"Evaluation\"]:\n partition_info = df_data_composition_all.loc[\n df_data_composition_all[\"partition\"] == partition\n ]\n\n summation = sum(composition_by_group[f\"{partition} set\"])\n assert summation == partition_info.iloc[0][\"responses\"]\n\n length = len(composition_by_group.loc[composition_by_group[f\"{partition} set\"] != 0])\n assert length == partition_info.iloc[0][group]", "def in_summary(path):\n stats = get_short_status(path)\n # dirs = os.listdir(path)\n dirs = [f.name for f in os.scandir(path) if f.is_dir()]\n\n if not DIR_JPG in dirs:\n print('Subdirectory missing: ' + DIR_JPG)\n if not DIR_RAW in dirs:\n print('Subdirectory missing: ' + DIR_RAW)\n if not DIR_VIDEO in dirs:\n print('Subdirectory missing: ' + DIR_VIDEO)\n\n print('Files in sub dirs: ' +\n str(len([s for s in stats if s['jpg']])) + ' jpgs, ' +\n str(len([s for s in stats if s['raw']])) + ' raws, ' +\n str(len([s for s in stats if s['video']])) + ' videos.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if each .phones file in data/phones is present in data/phones/summary.tsv and if the number of phones in each .phones file matches its listed number of phones in data/phones/summary.tsv. (Basically checks whether generate_summary.py has been run.)
def test_phones_data_matches_summary(): name_to_count = {} with open(_PHONES_SUMMARY, "r", encoding="utf-8") as phones_summary: for line in phones_summary: language = line.rstrip().split("\t") name_to_count[language[0]] = int(language[-1]) for phones_list in os.listdir(_PHONES_DIRECTORY): if phones_list.endswith(".phones"): with open( f"{_PHONES_DIRECTORY}/{phones_list}", "r", encoding="utf-8" ) as tsv: # We exclude blank lines and comments. num_of_entries = sum( 1 for line in tsv if line.strip() and not line.startswith("#") ) assert phones_list in name_to_count, ( f"{phones_list} in data/phones but not in " "data/phones/summary.tsv" ) assert name_to_count[phones_list] == num_of_entries, ( f"Number of entries in {phones_list} does not match " "number of entries in data/phones/summary.tsv." ) del name_to_count[phones_list] assert len(name_to_count) == 0, ( "The following .phones files are listed in " "data/phones/summary.tsv but could not be found in " f"data/phones: {[name for name in name_to_count.keys()]}" )
[ "def test_language_data_matches_summary():\n name_to_count = {}\n with open(_TSV_SUMMARY, \"r\", encoding=\"utf-8\") as lang_summary:\n for line in lang_summary:\n language = line.rstrip().split(\"\\t\")\n name_to_count[language[0]] = int(language[-1])\n\n for unique_tsv in os.listdir(_TSV_DIRECTORY):\n with open(\n f\"{_TSV_DIRECTORY}/{unique_tsv}\", \"r\", encoding=\"utf-8\"\n ) as tsv:\n num_of_entries = sum(1 for line in tsv)\n assert unique_tsv in name_to_count, (\n f\"{unique_tsv} in data/tsv but not in \" \"data/tsv_summary.tsv\"\n )\n assert name_to_count[unique_tsv] == num_of_entries, (\n f\"Number of entries in {unique_tsv} does not match \"\n \"number of entries in data/tsv_summary.tsv.\"\n )\n del name_to_count[unique_tsv]\n assert len(name_to_count) == 0, (\n \"The following TSVs are listed in data/tsv_summary.tsv \"\n \"but could not be found in data/tsv: \"\n f\"{[name for name in name_to_count.keys()]}\"\n )", "def _count_phones(filepath: str) -> Dict[str, Set[str]]:\n phone_to_examples = collections.defaultdict(set)\n with open(filepath, encoding=\"utf-8\") as source:\n for line in source:\n line = line.strip()\n if not line:\n continue\n word, pron = line.split(\"\\t\", maxsplit=1)\n example = f\"({word} | {pron})\"\n phones = pron.split()\n for phone in phones:\n phone_to_examples[phone].add(example)\n return phone_to_examples", "def check_summary_type(feat_files, fname_files):\n sum_type = []\n for file in feat_files + fname_files:\n tmp = ''.join(file.stem.split('_')[2:])\n tmp = ''.join([l for l in tmp if not l.isdigit()])\n sum_type.append(tmp)\n if not all(stype == sum_type[0] for stype in sum_type):\n raise ValueError('The summary files specified are not of the same type.')\n return", "def check_md5_existence(md5s):\n exists = False\n for md5sum in md5s:\n try:\n md5 = MD5.objects.get(md5sum=md5sum)\n print(f'MD5 {md5sum} already exists for sample {md5.sample_id}')\n exists = md5.sample_id\n except MD5.DoesNotExist:\n print(f'MD5 {md5sum} not found')\n pass\n return exists", "def detect(synonym, file1, file2, num_tuple):\n if not isfile(file1) or not isfile(file2) or not isfile(synonym):\n print(\"Invalid file name, please try again\")\n return\n file1 = open(file1)\n file2 = open(file2)\n file1_list = []\n file2_list = []\n match = 0\n synonyms = {}\n with open(synonym) as synonym:\n for line in synonym:\n syn_line = line.strip().split()\n for words in syn_line:\n synonyms[words] = syn_line\n for line in file1:\n file1_list += set_tuples(line, num_tuple)\n for line in file2:\n file2_list += set_tuples(line, num_tuple)\n for tuple1 in file1_list:\n for tuple2 in file2_list:\n if compare_tuples(tuple1, tuple2, synonyms):\n match += 1\n break\n print(\"{0:.2f}%\".format(match/(max(len(file1_list), len(file2_list))) * 100))", "def check_ms_existence(self):\n\n logger.info(\"-------------------------------------------\")\n logger.info(\"Checking the existence of measurement sets.\")\n logger.info(\"-------------------------------------------\")\n\n if self._ms_dict is None:\n return()\n\n found_count = 0\n missing_count = 0\n for target in self._ms_dict.keys():\n for project_tag in self._ms_dict[target].keys():\n for array_tag in self._ms_dict[target][project_tag].keys():\n for obs_tag in self._ms_dict[target][project_tag][array_tag].keys():\n found = False\n local_found_count = 0\n for ms_root in self._ms_roots:\n this_ms = ms_root + self._ms_dict[target][project_tag][array_tag][obs_tag]['file']\n if os.path.isdir(this_ms):\n found = True\n found_count += 1\n local_found_count += 1\n if local_found_count > 1:\n logger.error(\"Found multiple copies of ms for \"+target+\" \"+project_tag+\" \"+array_tag)\n if found:\n continue\n missing_count += 1\n logger.error(\"Missing ms for \"+target+\" \"+project_tag+\" \"+array_tag)\n\n logger.info(\"Verified the existence of \"+str(found_count)+\" measurement sets.\")\n if missing_count == 0:\n logger.info(\"No measurement sets found to be missing.\")\n else:\n logger.error(\"Missing \"+str(missing_count)+\" measurement set key entries.\")\n\n return()", "def _check_ipa_phonemes(phone_to_examples: Dict[str, Set[str]], filepath: str):\n bad_ipa_phonemes = frozenset(\n phone\n for phone in phone_to_examples.keys()\n if not (\n ipapy.is_valid_ipa(unicodedata.normalize(\"NFD\", phone))\n or phone in OTHER_VALID_IPA\n )\n )\n if len(bad_ipa_phonemes) and filepath.endswith(\"broad.tsv\"):\n logging.warning(\"Found %d invalid IPA phones:\", len(bad_ipa_phonemes))\n phoneme_id = 1\n for phoneme in bad_ipa_phonemes:\n bad_chars = [\n f\"[%d %04x %s %s]\"\n % (i, ord(c), unicodedata.category(c), unicodedata.name(c))\n for i, c in enumerate(ipapy.invalid_ipa_characters(phoneme))\n ]\n logging.warning(\n \"[%d] Non-IPA transcription: %s (%s)\",\n phoneme_id,\n phoneme,\n \" \".join(bad_chars),\n )\n phoneme_id += 1", "def analyze_minerals():\n datapath = os.path.join(DATA_DIR, 'minerals.json')\n with open(datapath) as datafile:\n\n fields = defaultdict(dict)\n valuesets = defaultdict(set)\n occurences = defaultdict(int)\n\n mineralsjson = json.load(datafile)\n\n for mineral in mineralsjson:\n for key, value in mineral.items():\n if value != '':\n occurences[key] += 1\n valuesets[key].add(value)\n if 'length' in fields[key].keys():\n if len(value) < fields[key]['length']:\n continue\n fields[key]['length'] = len(value)\n fields[key]['example'] = value\n\n with open('data_details.txt', 'w') as resultfile:\n for key in sorted(occurences,\n key=occurences.get,\n reverse=True):\n resultfile.write(\n (\"{4}\\nField: {0:25s}\\n{4}\\noccurence: #{1:3d}, \"\n \"max_length: {2:3d} \\nValues: {3}\\n\")\n .format(\n key,\n occurences[key],\n fields[key]['length'],\n valuesets[key],\n 80 * '-',\n )\n )\n\n with open('data_summary.txt', 'w') as resultfile:\n resultfile.write(\"{0:25s}|{1:15s}|{2:15s}|{3:15s}\\n\".format(\n 'Fieldname',\n 'occurence count',\n 'distinct count',\n 'max length',\n ))\n resultfile.write(\"{0:25s}|{1:15s}|{1:15s}|{1:15s}\\n\".format(\n 25 * '-',\n 15 * '-',\n ))\n for key in sorted(occurences, key=occurences.get,\n reverse=True):\n\n resultfile.write(\"{0:25s}|{1:15d}|{2:15d}|{3:15d}\\n\".format(\n key,\n occurences[key],\n len(valuesets[key]),\n fields[key]['length'],\n ))", "def check_uniqueness(self):\n for dset_path, incomings in self.paths.items():\n incoming_filenames = [incoming['filename'] for incoming in incomings]\n duplicates = [incoming['is_duplicate'] for incoming in incomings]\n latests = [incoming['latest'] for incoming in incomings]\n roots = [incoming['dset_root'] for incoming in incomings]\n assert latests.count(latests[0]) == len(latests)\n latest_version = latests[0]\n assert roots.count(roots[0]) == len(roots)\n dset_root = roots[0]\n latest_filenames = list()\n for _, _, filenames in os.walk(os.path.join(dset_root, latest_version)):\n latest_filenames.extend(filenames)\n # An upgrade version is different if it contains at least one file with is_duplicate = False\n # And it has the same number of files than the \"latest\" version\n if all(duplicates) and set(latest_filenames) == set(incoming_filenames):\n raise DuplicatedDataset(dset_path, latest_version)", "def check_equal_docnumbers(input_file1, input_file2):\n\n input_file1 = value_checkup(input_file1)\n input_file2 = value_checkup(input_file2)\n\n if input_file1 == '' or input_file2 == '':\n return False\n\n if os.path.exists(input_file1) and os.path.exists(input_file2):\n nlines_1 = sum(1 for line in open(input_file1))\n nlines_2 = sum(1 for line in open(input_file2))\n return nlines_1 == nlines_2\n\n return False", "def validate_data_files_exists(size, data_path):\n\n # If any file is missing the test will be skipped.\n for i in range(1, size + 1):\n validate_patient_file_exist(filename=CMS_PATIENT, years=YEARS, sample_id=i, data_path=data_path)\n validate_cms_file_exist(filename=CMS_INPATIENT, sample_id=i, data_path=data_path)\n validate_cms_file_exist(filename=CMS_OUTPATIENT, sample_id=i, data_path=data_path)\n validate_claims_file_exist(filename=CMS_CARRIER_CLAIMS, claim_ids=CLAIMS, data_path=data_path)\n validate_cms_file_exist(filename=CMS_DRUG, sample_id=i, data_path=data_path)\n return True", "def verify_samples(samples: List[SampleReads]) -> bool:\n\tall_exist = True\n\tfor sample in samples:\n\t\tif not sample.exists():\n\t\t\tlogger.warning(f\"The read files for sample {sample.name} do not exist.\")\n\t\t\tlogger.warning(f\"\\tForward Read: {sample.forward}\")\n\t\t\tlogger.warning(f\"\\tReverse Read: {sample.reverse}\")\n\t\t\tall_exist = False\n\treturn all_exist", "def validate_pairing_file(pairing_file, tumor_samples, normal_samples):\n for i, tn_pair in pairing_file.iterrows():\n tumor_id = tn_pair['tumor_id']\n normal_id = tn_pair['normal_id']\n assert tumor_id, 'Missing tumor sample ID in pairing file'\n\n # Find the path to the bam that contains this tumor sample ID\n tumor_sample = filter(lambda t: tumor_id in t , tumor_samples)\n assert len(tumor_sample) == 1, 'Incorrect # of matches for tumor sample {}'.format(tumor_sample)\n\n if normal_id and normal_id != '':\n normal_sample = filter(lambda n: normal_id in n, normal_samples)\n assert len(normal_sample) == 1, 'Incorrect # of matches ({}) for paired normal for tumor sample {}'.format(len(normal_sample), tumor_sample)", "def test_counts(self):\n from mirtop.libs import logger\n from mirtop.gff.convert import convert_gff_counts\n import argparse\n\n logger.initialize_logger(\"test counts\", True, True)\n logger = logger.getLogger(__name__)\n\n args = argparse.Namespace()\n args.hairpin = \"data/examples/annotate/hairpin.fa\"\n args.sps = \"hsa\"\n args.gtf = \"data/examples/annotate/hsa.gff3\"\n args.gff = 'data/examples/synthetic/let7a-5p.gff'\n args.out = 'data/examples/synthetic'\n args.add_extra = True\n convert_gff_counts(args)\n os.remove(os.path.join(args.out, \"let7a-5p.tsv\"))\n\n return True", "def check_existence(self):\n\n if self.quickTest:\n susuffix = \"QT\"\n else:\n susuffix = \"\"\n\n if self.disable_FM:\n presuffix = \"no\"\n else:\n presuffix = \"\"\n\n file_exist = True\n for nmbasis in self.numbasis:\n suffix1 = presuffix+\"FMMF-KL{0}\".format(nmbasis)+susuffix\n file_exist= file_exist and (len(glob(self.outputDir+os.path.sep+self.folderName+os.path.sep+self.prefix+'-'+suffix1+'.fits')) >= 1)\n\n if file_exist and not self.mute:\n print(\"Output already exist.\")\n\n return file_exist and not self.overwrite", "def checkDupplicates(master: List[ndarray], names: List[str] = None) -> None:\n \n if (names is None) or (len(names) != len(master)):\n try:\n len(names) != len(master)\n print(\"Given names were not enough. Using position in the list as name instead.\")\n except TypeError:\n pass\n names = np.char.array(['catalog nb ']*len(master)) + np.char.array(np.array(range(len(master)), dtype='str'))\n \n for catalog, nameCat in zip(master, names):\n cnt = True\n for ra, dec, nb in zip(catalog['RA'], catalog['DEC'], range(catalog['RA'].shape[0])):\n \n where1 = np.where(catalog['RA']==ra)[0]\n where2 = np.where(catalog['DEC']==dec)[0]\n \n if (len(where1)>1) and (len(where2)>1):\n \n flag = True\n for w in where2:\n \n if flag and (w in where1):\n print(\"RA =\", ra, \"deg and DEC =\", dec, \"deg galaxy (line \" + str(nb) + \") is present more than once in catalog\", nameCat)\n flag = False\n cnt = False\n if cnt:\n print(\"All the galaxies are only listed once in the catalog\", nameCat) \n return", "def check_files(collection):\n print(\"Collection '\" + collection.name + \"'\")\n \n count = collection.count()\n print(\"No. files: {0}\".format(count))\n\n count_per_study = collection.aggregate([\n { '$project' : { '_id' : 0, 'sid' : 1, 'sname' : 1 } },\n { '$group' : { \n '_id' : { 'study id' : \"$sid\", 'study name' : \"$sname\" },\n 'num sources' : { '$sum' : 1 }\n }\n }\n ])\n print(\"No. files per study:\")\n for c in count_per_study['result']:\n print(\"{0} - {1} files \\t({2})\".format(c['_id']['study id'], c['num sources'], c['_id']['study name']))\n\n indexes = collection.index_information()\n print(\"No. indexes: {0}\".format(len(indexes)))\n print(\"Indexes: {0}\".format(indexes.keys()))", "def _run_successful(sample):\n # TODO Implement more thoroughly than just checking if file is empty\n return os.stat(sample.mature_readcount).st_size >= 0 and os.stat(sample.hairpin_readcount).st_size >= 0", "def check(dir_main,prefix,keyphrase,subdir_length,subdir):\n \n #CLEAR ARRAYS \n Entries=[]\n Number = [] \n \n #ITERATE THROUGH ALL SUBDIRECTORIES\n for j in range(subdir_length):\n \n #DEFINE PATH OF SPECIFIC DIRECTORY\n dir_out = dir_main+subdir[j]+'/'+prefix+'/'\n path = dir_out\n files = dir_out+'*.out'\n \n #IF THIS PATH ACTUALLY CONTAINS A RELAX, STATIC, OR DFPT DIR\n if os.path.exists(dir_out):\n \n #LIST ALL .OUT FILES AS ARRAY\n name = glob.glob(files)\n name = np.asanyarray(name)\n \n #IF THERE ARE SLURM FILES, LOOP THROUGH AND SEARCH FOR KEYWORD\n if len(name)!=0:\n Number = np.append(Number,len(name))\n num_vec = []\n \n #PULL ID NUMBER FOR ALL .OUT FILES CONTAINED IN DIRECTORY\n for k in range(len(name)):\n name2 = name[k]\n num = int(name2[-11:-4])\n num_vec = np.append(num_vec,num)\n \n #FIND .OUT FILE WITH MAX NUMBER (MOST RECENT NUMBER) AND READ AS STRING\n m = max(num_vec)\n position = [i for i, j in enumerate(num_vec) if j == m]\n str_output = os.popen('grep \"'+ keyphrase +'\" '+name[position][0])\n string = str_output.read()\n \n #IF KEYPHRASE EXISTS FROM GREP - THEN IT HAS CONVERGED\n if string:\n Entries=np.append(Entries,' Y ')\n else:\n Entries=np.append(Entries,' N ')\n #OUTPUT FILES NOT FOUND \n else:\n Entries=np.append(Entries,' DNR ')\n Number=np.append(Number,0)\n else:\n Entries=np.append(Entries,'DNR')\n Number=np.append(Number,0)\n \n return Entries,Number" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Task to flag all bonds having passed maturity as matured. Also creates a rating decision for each matured bond
def mature_debt(): logger.info("Flagging bonds as matured.") for i in Issue.objects.matures_today(): # Step 1, flag the bond as matured i.is_matured = True i.save() try: # See if there is an existing rating decision for this issue # Current rating ci = IssueDecision.objects.get( issue=i, is_current=True, ) # Set current rating decision to not current ci.is_current = False ci.save() # Create a decision with rating 'NR' which means withdrawn # rating d = IssueDecision.objects.create( previous_rating=ci, rating_decision_issue=ci.rating_decision_issue, issue=i, is_current=True, decided_lt=200, date_time_committee=timezone.now(), date_time_communicated_issuer=timezone.now() + timedelta( minutes=1), date_time_published=timezone.now() + timedelta( minutes=2), # Default the decision to the same people who made the initial # decision # TODO: consider how to handle this in the long run # it might be that a person has left the company chair=ci.chair, proposed_by=ci.proposed_by, rationale='Automatic system insert due to matured issue.', process_step=10, ) # Has to be invoked like this rather than signal # as the order of signalled cannot be easily controlled refresh_issue_decision_attributes(d) # Send an email to issuer with the decision to_list = [ i.issuer.analyst.primary_analyst.email, ] cc_list = [ i.issuer.analyst.primary_analyst.email, i.issuer.analyst.secondary_analyst.email, i.issuer.relationship_manager.email, ] header = ISSUE_WR_DECISION_HEADER.format(i.isin) body = ISSUE_WR_DECISION_BODY.format(i.isin) # Send notification send_email.delay( header=header, body=body, to=to_list, cc=cc_list, ) except IssueDecision.DoesNotExist: # This issue has not been rated, do nothing pass
[ "def __AcceptAllMandates(self, user):\n if self._mandates:\n for mandate in self._mandates:\n if mandate.IsAcceptedByTrader(user) is False:\n mandate.AddAcceptedTrader(user)\n mandate.Commit()\n getLogger().debug('Accepted mandate (%s) .' % mandate.Name())\n else:\n getLogger().debug('Mandate has previously been accepted (%s)' % mandate.Name())", "def _bonus(self, animal):\n\n # Check card bonuses\n if animal in self._bonuses:\n bonus = self._bonuses[animal].pop(0)\n self._log.info(f\"Applying bonus of {bonus} for animal {animal}.\")\n\n for player in self._players.values():\n player.budget += bonus", "def test_apply_action_gain_radical(self):\n action = ['GAIN_RADICAL', '*1', 1]\n for order0 in self.orderList:\n bond0 = Bond(None, None, order=order0)\n bond = bond0.copy()\n try:\n bond.apply_action(action)\n self.fail('Bond.apply_action() unexpectedly processed a GAIN_RADICAL action '\n 'with order {0}.'.format(order0))\n except ActionError:\n pass", "def bonus(self, participant):\n\n # Get only the \"info\" from target participant's nodes.\n all_nodes = participant.nodes()\n experiment_nodes = [n for n in all_nodes\n if n.network.role == \"experiment\"]\n nested_infos = [n.infos() for n in experiment_nodes]\n flattened_infos = [info_item for info_list in nested_infos\n for info_item in info_list]\n\n # Grab their final accuracy scores.\n score = [float(info.property3) for info\n in flattened_infos] # get the accuracy of the infos\n\n # If they timed out, give them no bonuses.\n if -9999999999999999999999999999 in score:\n bonus = 0.0\n\n # Otherwise, grant them appropriate bonuses\n else:\n score = [trial_bonus if trial_bonus > 0.0 else 0.0 for trial_bonus in score ]\n mean_accuracy = float(sum(score))/float(self.total_test_trials)\n bonus = round(min((self.accuracy_bonus_payment +\n self.completion_bonus_payment),\n max(0.0, ((mean_accuracy *\n self.accuracy_bonus_payment) +\n self.completion_bonus_payment))),\n 2)\n return bonus", "def affinity_bonus(self, const, aff_number):\n if 'Crossroads' in self.name and self not in unlocked_stars:\n const.members[0].bonus_type[0].add_points(const.members[0].bonus_value[0])\n # Confirm that all stars of a constellation has been unlocked and that the constellation grants some bonus\n # to affinities.\n elif const.check_full() and const.affinity_bonus:\n for i in range(0, aff_number):\n const.members[0].bonus_type[i].add_points(const.members[0].bonus_value[i])\n return True", "def updateResponsiveness(self):\n\n def injury(self):\n # 1 / 200 chance of 'break down'\n # if random.randint(1, 5000) == 666: return True\n # else: return False\n return False\n\n\n def runningStyleImpact(self, c):\n sortedComps = sorted(self.competitors, key = operator.attrgetter('distance'))\n topThird = []\n middleThird = []\n bottomThird = []\n topRange = (int((NUM_OF_COMPETITORS / 3) * 2), NUM_OF_COMPETITORS)\n middleRange = (int((NUM_OF_COMPETITORS / 3)), int((NUM_OF_COMPETITORS / 3) * 2))\n bottomRange = (0, int((NUM_OF_COMPETITORS / 3)))\n for i in range(bottomRange[0], bottomRange[1]): bottomThird.append(sortedComps[i])\n for i in range(middleRange[0], middleRange[1]): middleThird.append(sortedComps[i])\n for i in range(topRange[0], topRange[1]): topThird.append(sortedComps[i])\n if self.raceSplit['start'][0] <= c.distance <= self.raceSplit['start'][1]:\n if c in topThird:\n if c.running_style == \"frontrunner\" and c.id not in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness * random.gauss(1.2, 0.05)\n self.runningStyleImpactChanged.append(c.id)\n if self.raceSplit['middle'][0] <= c.distance <= self.raceSplit['middle'][1]:\n if c in middleThird:\n if c.running_style == \"stalker\" and c.id not in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness * random.gauss(1.2, 0.05)\n self.runningStyleImpactChanged.append(c.id)\n if c.running_style == \"frontrunner\" and c.id in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness / random.gauss(1.2, 0.05)\n self.runningStyleImpactChanged.remove(c.id)\n if self.raceSplit['end'][0] <= c.distance <= self.raceSplit['end'][1]:\n if c in bottomThird or c in middleThird:\n if c.running_style == \"closer\" and c.id not in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness * random.gauss(1.1, 0.05)\n self.runningStyleImpactChanged.append(c.id)\n if c.running_style == \"stalker\" and c.id in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness / random.gauss(1.1, 0.05)\n self.runningStyleImpactChanged.remove(c.id)\n\n\n def finalStretch(self, c):\n if c.id in self.finished: return\n if c.distance >= self.race_attributes.length - self.finalStretchDist[self.race_attributes.race_type] and c.id not in self.finalStretchIncreases:\n # in final stretch\n distanceLeft = float(self.race_attributes.length - c.distance)\n energyLeft = int(c.energy / distanceLeft)\n buildUp = energyLeft / distanceLeft\n # multiply buildUp by 2 for more dramatic race events\n self.finalStretchIncreases[c.id] = buildUp * 3\n\n if c.id in self.finalStretchIncreases:\n c.responsiveness = c.responsiveness + self.finalStretchIncreases[c.id]\n\n # if race is long then competitors should have lower responsiveness at start and middle with burst at end\n # if race is short then competitors should have resonably consistent responsiveness throughout\n for c in self.competitors:\n if c in self.injuredCompetitors or c.id in self.finished:\n continue\n if injury(self) == True:\n c.responsiveness = 0\n self.injuredCompetitors.append(c)\n runningStyleImpact(self, c)\n finalStretch(self, c)", "def assign_validity_of_bonds(debt_details):\n debt_details['Valid'] = True\n\n # Set the value of Valid for the matching rows to False\n debt_details.loc[\n pd.isnull(debt_details['Coupon']) |\n pd.isnull(debt_details['Maturity (years)']) |\n (debt_details['Maturity (years)'] < 0),\n 'Valid'\n ] = False", "def set_donors_acceptors(self):\n for residue in self.residues:\n residue.set_donors_acceptors()", "def _disburse( self, amount ):\n for x in self.active:\n for y in x:\n c, source_classifiers = y\n c.pay( amount / float(len(self.active)) )\n for z in source_classifiers:\n z.pay( amount / float(len(source_classifiers)) )", "def test_update_bonus_pool_good(self):\n ad_rep = AD_REP_FACTORY.create_ad_rep()\n self.assertEqual(ad_rep.consumer_points, 0)\n flyer = Flyer.objects.create(site_id=2, is_approved=True)\n consumer_ids = []\n consumer_emails = []\n for consumer_x in range(5):\n email = 'test_update_bonus_pool_%s@example.com' % consumer_x\n consumer = Consumer.objects.create(site_id=2, email=email,\n username=email)\n consumer_ids.append(consumer.id)\n consumer_emails.append(email)\n # Five consumers to receive this flyer\n # Two who receive this flyer will be related to this ad_rep.\n # A third consumer of this ad rep does not receive the flyer.\n for consumer_id in consumer_ids[3:6]:\n AdRepConsumer.objects.create(ad_rep=ad_rep, consumer_id=consumer_id)\n UPDATE_CONSUMER_BONUS_POOL(flyer.id, consumer_emails)\n try:\n bonus_pool_flyer = BonusPoolFlyer.objects.get(flyer__id=flyer.id)\n except BonusPoolFlyer.DoesNotExist:\n self.fail(\"Bonus pool flyer not created.\")\n # Bonus pool accumulates two point for his consumers that receive this \n # flyer.\n self.assertEqual(bonus_pool_flyer.calculate_status, '2')\n ad_rep = AdRep.objects.get(id=ad_rep.id)\n self.assertNotEqual(ad_rep.consumer_points, 0)", "def _reward_finisher(self, t):\n self._rewardlock.acquire()\n self._juice_drip(t)\n self._rewardlock.release()", "def unlock_till_star(self, st):\n\n counter = 0\n for member in self.members:\n if member.line == st.line or member.line is None and member.rank <= st.rank:\n member.unlock()\n member.add_bonus()\n counter += 1\n # Set minimum value of a particular affinity type points below which the program cannot go so that all\n # stars remain in unlocked state.\n if st in unlocked_stars:\n for aff in self.requirement:\n eval(aff).set_minimum(self.requirement[aff])\n break\n # Add affinity bonus only if all stars of a constellation have been unlocked.\n if counter == len(self.members):\n self.add_affinity_bonus()\n return st in unlocked_stars", "def _AllMandatesAccepted(self, user):\n if self._mandates:\n for mandate in self._mandates:\n if mandate.IsAcceptedByTrader(user) is False:\n return False\n return True", "def susceptible(g, agent, belief):\n############ Changes ############\n q = g.nodes[agent]['q'] # probability of not getting adopted\n try:\n if nx.shortest_path_length(g.nodes[agent]['M'], *belief) <= 2:\n q *= triangle_sensitivity\n except (nx.NetworkXNoPath, nx.NodeNotFound):\n # no path exists between the nodes\n pass\n\n familiarity = sum([v for k,v in g.nodes[agent]['M'].degree(belief)])\n q *= familiarity_sensitivity**familiarity\n\n adopt = np.random.binomial(1, p=1-q) == 1\n#################\n return adopt", "def calculate_stability(self):\n\t\tself.stability = 0\n\n\t\tfor i in range(len(self.other_bonds)):\n\t\t\tself.other_bonds[i].determine_value()\n\t\t\tself.stability = self.stability + self.other_bonds[i].value\n\n\t\t# print \"stability of this fold: \", self.stability", "def compute_reward(self, obs, action):\n pass", "def zombie_has_enough_meals(donatorMeals,donatorUpgrade,mealsToDonate):\n\tthresholds = {'n' : 4, 'N' : 7, 'D' : 10, 'E' : 8}\n\tif donatorUpgrade in thresholds:\n\t\tminimumAmount = thresholds[donatorUpgrade]\n\telse :\n\t\tminimumAmount = 0\n\tif donatorMeals - mealsToDonate < minimumAmount:\n\t\treturn False\n\telse :\n\t\treturn True", "def test_apply_action_lose_radical(self):\n action = ['LOSE_RADICAL', '*1', 1]\n for order0 in self.orderList:\n bond0 = Bond(None, None, order=order0)\n bond = bond0.copy()\n try:\n bond.apply_action(action)\n self.fail('Bond.apply_action() unexpectedly processed a LOSE_RADICAL action '\n 'with order {0}.'.format(order0))\n except ActionError:\n pass", "def mm_optimise(self, molecule):\n\n # Check which method we want then do the optimisation\n if self.args.mm_opt_method == 'openmm':\n # Make the inputs\n molecule.write_pdb(name='openmm', input_type='input')\n molecule.write_parameters(name='state')\n # Run geometric\n system('geometric-optimize --reset --epsilon 0.0 --maxiter 500 --qccnv --pdb openmm.pdb --openmm state.xml > log.xt')\n # Get the optimised structure store under mm\n molecule.read_xyz(input_type='mm')\n\n else:\n # Run an rdkit optimisation with the right FF\n rdkit_ff = {'rdkit_mff': 'MFF', 'rdkit_uff': 'UFF'}\n molecule.filename = smiles_mm_optimise(molecule.filename, ff=rdkit_ff[self.args.mm_opt_method])\n\n append_to_log(f'Optimised the molecule with {self.args.mm_opt_method}')\n\n return molecule" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recount familiar movies | persons for user
def recount_familiar_objects(content_type_id, user_id): model = ContentType.objects.get_for_id(content_type_id).model_class() user = User.objects.get(pk=user_id) count = UserRelationCount.objects.get_or_create(object=user)[0] if model == MovieRelation: count.movies = user.familiar_movies.count() elif model == PersonRelation: count.persons = user.familiar_persons.count() count.save()
[ "def _count_movies(request):\n user_ratings_train = _process_data[\"user_ratings_train\"]\n movie_counts = {} # {movie id: count}\n\n # go through \"user_ratings_train\" to build up \"movie_counts\"\n for user_id, movie_ratings in user_ratings_train:\n for movie_id, rating in movie_ratings:\n if movie_id not in movie_counts:\n movie_counts[movie_id] = 0\n\n movie_counts[movie_id] += 1\n\n _process_data[\"movie_counts\"] = movie_counts", "def get_number_of_movies(self) -> int:\r\n raise NotImplementedError", "def movie_count(self):\r\n total_movie_count = 0\r\n for line in self.movies:\r\n total_movie_count += 1\r\n return total_movie_count", "def ranking_actors_performed():\n reader = initialize_reader()\n names_list = [row[10] for row in reader] \n names_for = list(names_list)\n names = []\n for name in names_for:\n if {\"name_actor\": name, \"movies_performed\": names_for.count(name)} not in names:\n names.append({\"name_actor\": name, \"movies_performed\": names_for.count(name)})\n else:\n names_for.remove(name)\n new_list = sorted(names, key=lambda i: i['movies_performed'], reverse=True)\n ranking_ten_list = new_list[:10]\n rank = 0\n print(\"\\nRanking actors Number of movies where the actor performed \\n\")\n for actor in ranking_ten_list:\n rank = rank + 1\n print(f\"Rank {rank} is {actor.get('name_actor')} with {actor.get('movies_performed')}\")", "def default_movies_for_user(userscore, services, num_movies, watched_movies):\n \n movies = []\n alreadyseen = []\n total = 0\n genrescore = userscore.copy()\n for genre in genrescore:\n total += genrescore[genre]\n\n for genre in genrescore:\n genrescore[genre] = genrescore[genre] / total\n\n for genre in genrescore:\n genrescore[genre] = math.ceil(genrescore[genre] * num_movies)\n\n moviessofar = 0\n services_string = '|'.join(services)\n watchprovidersstring = \"&with_watch_providers=\" + services_string + \"&watch_region=US\"\n if services == []:\n watchprovidersstring = ''\n page = 1\n response = requests.get(\"https://api.themoviedb.org/3/discover/movie?api_key=\" + tmdb_api_key +\n \"&language=en-US&region=US&sort_by=popularity.desc&include_adult=false&include_video=false&page=\" + str(page) +\n watchprovidersstring + \"&with_watch_monetization_types=flatrate\")\n data = response.json()['results']\n\n for genre in genrescore:\n while moviessofar < genrescore[genre]:\n for result in data:\n if result['title'] not in alreadyseen and (str(result['id']) not in watched_movies) and moviessofar < genrescore[genre] and str(genre) in str(result['genre_ids']):\n movie = {}\n movie['id'] = result['id']\n movie['title'] = result['title']\n movie['genre_ids'] = result['genre_ids']\n movie['image'] = 'https://image.tmdb.org/t/p/w500' + result['poster_path']\n sources = sources_from_tmdbID(movie['id'])\n if sources != 'None':\n sources_with_service = [sources[x] for x in sources if str(sources[x]) in services] \n movie['source'] = sources_with_service\n movies.append(movie)\n alreadyseen.append(result['title'])\n moviessofar += 1\n page += 1\n if moviessofar < genrescore[genre]:\n response = requests.get(\"https://api.themoviedb.org/3/discover/movie?api_key=\" + tmdb_api_key +\n \"&language=en-US&region=US&sort_by=popularity.desc&include_adult=false&include_video=false&page=\" + str(page) +\n watchprovidersstring + \"&with_watch_monetization_types=flatrate\")\n data = response.json()['results']\n moviessofar = 0\n\n random.shuffle(movies)\n if len(movies) - num_movies > 0:\n return movies[:-(len(movies) - num_movies)]\n return movies", "def recommend_movies(target_rating: Rating,\n movies: MovieDict, \n user_ratings: UserRatingDict,\n movie_users: MovieUserDict,\n num_movies: int) -> List[int]:\n\n # Your code here\n \n movie_score = {}\n \n ## First step = 'we will need to find users similar'\n similar_user = get_similar_users(target_rating, user_ratings, movie_users) \n \n ## Second step = 'This will be our list of candidate movies'\n ## get_candidate_mov created\n candidate_mov = get_candidate_mov(similar_user, user_ratings, target_rating)\n \n ## Third step = 'track a \"score\" for each movie'\n ## get_mov_score created\n for mov in candidate_mov:\n movie_score[mov] = get_mov_score(mov, \n user_ratings, \n similar_user, \n candidate_mov) \n \n ## Forth step = 'The return list should contain movie ids with the highest scores'\n ## sort_score_list created\n sorted_list = sort_score_list(movie_score)\n \n ## Last step = ' list should be no longer than the value of this parameter'\n final_list = sorted_list[:num_movies]\n \n return final_list", "def _increment_reviewer_counts(self):\n from reviewboard.accounts.models import LocalSiteProfile\n\n groups = list(self.target_groups.values_list('pk', flat=True))\n people = list(self.target_people.values_list('pk', flat=True))\n\n Group.incoming_request_count.increment(self.target_groups.all())\n LocalSiteProfile.direct_incoming_request_count.increment(\n LocalSiteProfile.objects.filter(user__in=people,\n local_site=self.local_site))\n LocalSiteProfile.total_incoming_request_count.increment(\n LocalSiteProfile.objects.filter(\n Q(local_site=self.local_site) &\n Q(Q(user__review_groups__in=groups) |\n Q(user__in=people))))\n LocalSiteProfile.starred_public_request_count.increment(\n LocalSiteProfile.objects.filter(\n profile__starred_review_requests=self,\n local_site=self.local_site))", "def show_recipes_count_per_user(value):\n return Recipe.objects.filter(author=value).count()", "def movie_watched_count(self):\r\n watched_count = 0\r\n for line in self.movies:\r\n if line.is_watched:\r\n watched_count += 1\r\n return watched_count", "def create_user_movie_rating(n_movies=1682):\n ratings = np.zeros((n_movies, 1))\n ratings[0] = 4\n ratings[6] = 3\n ratings[11] = 5\n ratings[53] = 4\n ratings[63] = 5\n ratings[65] = 3\n ratings[68] = 5\n ratings[97] = 2\n ratings[182] = 4\n ratings[225] = 5\n ratings[354] = 5\n return ratings", "def movies_to_users(user_ratings: UserRatingDict) -> MovieUserDict:\n\n # Your code here\n mov_list = []\n mov_to_p = {}\n for p in user_ratings:\n for mov in user_ratings[p]:\n if mov not in mov_list:\n mov_list.append(mov)\n for mov in mov_list:\n mov_to_p[mov] = []\n for p in user_ratings:\n if mov in user_ratings[p]:\n mov_to_p[mov].append(p)\n mov_to_p[mov].sort()\n return mov_to_p", "def output_shape(filtered_recs, movies_user_has_seen, num_recs=3):\n counter = 0\n recommendations = {}\n for key, value in filtered_recs.items():\n if counter >= num_recs:\n break\n else:\n if key not in movies_user_has_seen:\n print(value)\n recommendations[int(key)] = {\"title\": value}\n counter += 1\n else:\n pass\n return recommendations", "def _actors_count(actor: str, total_actors: List) -> List:\n return [actor, total_actors.count(actor)]", "def num_voters(self):\n return User.objects.filter(votes__option__topic=self).distinct().count()", "def reduce_users_and_items(ratings, users, items, threshold=100):\n\n user_counts = ratings['user_id'].value_counts()\n ratings_v2 = ratings.loc[ratings['user_id'].isin(user_counts[user_counts >= threshold].index)]\n users_v2 = users.loc[users['user_id'].isin(user_counts[user_counts >= threshold].index)]\n\n item_counts = ratings['movie_id'].value_counts()\n ratings_v2 = ratings_v2.loc[ratings['movie_id'].isin(item_counts[item_counts >= threshold].index)]\n items_v2 = items.loc[items['movie_id'].isin(item_counts[item_counts >= threshold].index)]\n\n analyze_and_plot_data(ratings_v2)\n\n\n new_movie_id = np.arange(1, items_v2.shape[0]+1)\n new_movie = pd.Series(new_movie_id).rename('new_id')\n items_v2 = items_v2.assign(new_id=new_movie.values)\n\n new_user_id = np.arange(1, users_v2.shape[0]+1)\n new_user = pd.Series(new_user_id).rename('new_id')\n users_v2 = users_v2.assign(new_id=new_user.values)\n\n for i in range(items.shape[0]+1):\n new_id = items_v2.loc[items_v2['movie_id'] == i]['new_id']\n if (len(new_id) != 0):\n new = new_id.values\n new_movie_ids = ratings_v2['movie_id'].mask(ratings_v2['movie_id']==i, new)\n ratings_v2 = ratings_v2.assign(movie_id = new_movie_ids)\n\n \n for u in range(users.shape[0]+1):\n new_id = users_v2.loc[users_v2['user_id'] == u]['new_id']\n if (len(new_id) != 0):\n new = new_id.values\n new_user_ids = ratings_v2['user_id'].mask(ratings_v2['user_id']==u, new)\n ratings_v2 = ratings_v2.assign(user_id =new_user_ids)\n\n\n train, test = train_test_split(ratings_v2, test_size=0.2)\n items_v2 = items_v2.drop(['movie_id'], axis=1)\n users_v2 = users_v2.drop(['user_id'], axis=1)\n items_v2.rename(columns = {'new_id':'movie_id'}, inplace = True)\n users_v2.rename(columns = {'new_id':'user_id'}, inplace = True)\n\n return items_v2, users_v2, ratings_v2, train, test", "def test_counts_with_reassignment(self):\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1)\n\t\tnew_user = User.objects.create_user(username=\"test2\", password=\"\", email=\"user@example.com\")\n\t\tdraft = ReviewRequestDraft.create(self.review_request)\n\t\tdraft.owner = new_user\n\t\tdraft.target_people = [draft.owner]\n\t\tdraft.save()\n\t\tself.review_request.publish(self.user)\n\t\tself._check_counters(total_outgoing=0, pending_outgoing=0, starred_public=1)\n\t\tsite_profile = new_user.get_site_profile(self.review_request.local_site)\n\t\tself._check_counters_on_profile(site_profile, total_outgoing=1, pending_outgoing=1, direct_incoming=1, total_incoming=1)", "def years_movies_released():\n reader = initialize_reader()\n years_list = [row[23] for row in reader]\n years_dicts = [{\"year\": i, \"movies_released\": years_list.count(i)} for i in years_list]\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'])\n year_less_movies = new_list[:1]\n print(f\"The year {year_less_movies[0].get('year')} had less movies released with {year_less_movies[0].get('movies_released')}\")\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'], reverse=True)\n year_more_movies = new_list[:1]\n print(f\"The year {year_more_movies[0].get('year')} had more movies released with {year_more_movies[0].get('movies_released')}\")", "def scrape_user_reviews(movies):\n user_reviews = []\n for movie in movies:\n review_count = 0\n review_movie_rank = movie[1]\n review_movie = movie[2]\n review_url = movie[6]\n # form the proper url\n review_url = f\"https://www.imdb.com/{review_url}reviews?sort=reviewVolume&dir=desc&ratingFilter=0\"\n # sleep for random time to avoid IP Block\n # sleep(randint(1, 5))\n response = requests.get(review_url).text\n soup = BeautifulSoup(response, 'lxml')\n\n for review_container in soup.find_all('div', class_='imdb-user-review'):\n review_meta = review_container.find('div', class_='display-name-date')\n review_title = review_container.a.text.strip('\\n')\n review_date = review_container.find('span', class_='review-date').text\n reviewer_rating = review_container.find('div', class_='ipl-ratings-bar')\n if reviewer_rating == None:\n reviewer_rating = ''\n else:\n reviewer_rating = reviewer_rating.text.strip('\\n')\n reviewer = review_meta.a.text\n review_content = review_container.find('div', class_='content').div.text\n review = (\n review_count,\n review_movie,\n review_movie_rank,\n review_title,\n reviewer_rating,\n reviewer,\n review_date,\n review_content\n )\n review_count += 1\n print(review_movie, review_count)\n user_reviews.append(review)\n return user_reviews", "def pred_movies(movie_list):\n # Store the id of users\n id_store=[]\n # For each movie selected by a user of the app,\n # predict a corresponding user within the dataset with the highest rating\n for i in movie_list:\n predictions = prediction_item(item_id = i)\n predictions.sort(key=lambda x: x.est, reverse=True)\n # Take the top 10 user id's from each movie with highest rankings\n for pred in predictions[:10]:\n id_store.append(pred.uid)\n # Return a list of user id's\n return id_store" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recount relations for movie | person
def recount_object_relations(content_type_id, instance_id): model = ContentType.objects.get_for_id(content_type_id).model_class() try: instance = model.objects.get(pk=instance_id) except model.DoesNotExist: return relations_counts = {} for code in instance.codes: relations_counts[code] = model.objects.filter(object_id=instance.object.id, **{code: True}).count() model.count_model.objects.update_or_create(object_id=instance.object.id, defaults=relations_counts)
[ "def recount_familiar_objects(content_type_id, user_id):\n model = ContentType.objects.get_for_id(content_type_id).model_class()\n user = User.objects.get(pk=user_id)\n\n count = UserRelationCount.objects.get_or_create(object=user)[0]\n\n if model == MovieRelation:\n count.movies = user.familiar_movies.count()\n elif model == PersonRelation:\n count.persons = user.familiar_persons.count()\n count.save()", "def n_relations(self):\n return len(self.relations)", "def get_relation_count():\n if DataHelper.df_relation2id is None:\n return 0\n return len(DataHelper.df_relation2id)", "def measure_relations(self):\n self.relation_handler.measure_relations(self.edge_dict)", "def _increment_reviewer_counts(self):\n from reviewboard.accounts.models import LocalSiteProfile\n\n groups = list(self.target_groups.values_list('pk', flat=True))\n people = list(self.target_people.values_list('pk', flat=True))\n\n Group.incoming_request_count.increment(self.target_groups.all())\n LocalSiteProfile.direct_incoming_request_count.increment(\n LocalSiteProfile.objects.filter(user__in=people,\n local_site=self.local_site))\n LocalSiteProfile.total_incoming_request_count.increment(\n LocalSiteProfile.objects.filter(\n Q(local_site=self.local_site) &\n Q(Q(user__review_groups__in=groups) |\n Q(user__in=people))))\n LocalSiteProfile.starred_public_request_count.increment(\n LocalSiteProfile.objects.filter(\n profile__starred_review_requests=self,\n local_site=self.local_site))", "def _count_movies(request):\n user_ratings_train = _process_data[\"user_ratings_train\"]\n movie_counts = {} # {movie id: count}\n\n # go through \"user_ratings_train\" to build up \"movie_counts\"\n for user_id, movie_ratings in user_ratings_train:\n for movie_id, rating in movie_ratings:\n if movie_id not in movie_counts:\n movie_counts[movie_id] = 0\n\n movie_counts[movie_id] += 1\n\n _process_data[\"movie_counts\"] = movie_counts", "def update_count():\n methods = GeneFamilyMethod.query.all()\n\n for m in methods:\n m.family_count = m.families.count()\n\n try:\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n print(e)", "def get_number_of_movies(self) -> int:\r\n raise NotImplementedError", "def test_relation_count_dataframe(self):\n for labels, merge_subsets in itertools.product((False, True), repeat=2):\n _test_count_dataframe(\n dataset=self.dataset,\n df=dataset_analysis.get_relation_count_df(\n dataset=self.dataset,\n add_labels=labels,\n merge_subsets=merge_subsets,\n ),\n labels=labels,\n merge_subsets=merge_subsets,\n )", "def get_class_count(self):\n for rec in self:\n count = self.env['assign.class'].search_count(\n [('professor_id', '=', rec.id)])\n rec.extra_class_count = count", "def verse_count(self):\n count = 0\n for lection in self.lections_where_active():\n count += lection.verses.count() # This should be done with an aggregation function in django\n return count", "def movie_count(self):\r\n total_movie_count = 0\r\n for line in self.movies:\r\n total_movie_count += 1\r\n return total_movie_count", "def _decrement_reviewer_counts(self):\n from reviewboard.accounts.models import LocalSiteProfile\n\n groups = list(self.target_groups.values_list('pk', flat=True))\n people = list(self.target_people.values_list('pk', flat=True))\n\n Group.incoming_request_count.decrement(self.target_groups.all())\n LocalSiteProfile.direct_incoming_request_count.decrement(\n LocalSiteProfile.objects.filter(\n user__in=people,\n local_site=self.local_site))\n LocalSiteProfile.total_incoming_request_count.decrement(\n LocalSiteProfile.objects.filter(\n Q(local_site=self.local_site) &\n Q(Q(user__review_groups__in=groups) |\n Q(user__in=people))))\n LocalSiteProfile.starred_public_request_count.decrement(\n LocalSiteProfile.objects.filter(\n profile__starred_review_requests=self,\n local_site=self.local_site))", "def get_relation_count_query(self, query, parent):\n query = super(MorphOneOrMany, self).get_relation_count_query(query, parent)\n\n return query.where(self._morph_type, self._morph_class)", "def update_count(sender, instance, signal, *args, **kwargs):\n\n try:\n instance.lesson.update_num_enrolled()\n except:\n print(\"error: can't update lesson num_enrolled\")", "def test_like_objects_change_like_count(self):\n mp = MP.objects.create(\n name=\"Julius Caesar\",\n constituency=CONSTITUENCIES.constants[0],\n party=PARTIES.constants[0]\n )\n speech = Speech.objects.create(\n mp=mp,\n timestamp=timezone.now(),\n text=\"Make dancing Tuesdays mandatory\",\n )\n user = User.objects.create(mp=mp, constituency=mp.constituency)\n self.assertEqual(speech.like_count, 0)\n like = Like.objects.create(user=user, speech=speech)\n speech = Speech.objects.get(pk=speech.pk) # reload\n self.assertEqual(speech.like_count, 1)\n # Now re-save the Like and make sure it doesn't increment again\n like.save()\n speech = Speech.objects.get(pk=speech.pk) # reload\n self.assertEqual(speech.like_count, 1)\n # Now delete the Like. The count should reset to 0.\n like.delete()\n speech = Speech.objects.get(pk=speech.pk) # reload\n self.assertEqual(speech.like_count, 0)", "def docids_count():", "def sync_counts(self):\n self.votes = self.question.votes\n self.reply_count = Post.filter_by(topic=self).count() - 1", "def test_counts_with_reassignment(self):\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1)\n\t\tnew_user = User.objects.create_user(username=\"test2\", password=\"\", email=\"user@example.com\")\n\t\tdraft = ReviewRequestDraft.create(self.review_request)\n\t\tdraft.owner = new_user\n\t\tdraft.target_people = [draft.owner]\n\t\tdraft.save()\n\t\tself.review_request.publish(self.user)\n\t\tself._check_counters(total_outgoing=0, pending_outgoing=0, starred_public=1)\n\t\tsite_profile = new_user.get_site_profile(self.review_request.local_site)\n\t\tself._check_counters_on_profile(site_profile, total_outgoing=1, pending_outgoing=1, direct_incoming=1, total_incoming=1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
nn.MaxPool2d(kernel_size, stride=None, padding=0, dilation=1) NOTABLE PARAMS 1. 'in_channels' == out_channels of conv function 2. 'strides' > if None, defaults as == to kernel_size
def maxpool(self, kernel_size: Union[Tuple[int], int], stride: Union[Tuple[int], int] = None, padding: Union[Tuple[int], int] = 0, dilation: Union[Tuple[int], int] = 1) -> Tuple[int]: if not stride: stride = deepcopy(kernel_size) self.__calculate_output(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation) return self.input_channels, self.height, self.width
[ "def maxpool2d_out_dim(in_dim, kernel_size, padding=1, stride=1, dilation=1):\n out_dim = ((in_dim + 2*padding - dilation*(kernel_size-1) - 1)/stride) + 1\n return out_dim\n\n #TODO make a util function to calculate the output size of a layer given a input dim\n #ie get the input size of a linear layer by giving input h or w", "def max_pool_2d(x, size=(2, 2), stride=(2, 2), name='max_pooling',padding='VALID'):\n size_x, size_y = size\n stride_x, stride_y = stride\n return tf.nn.max_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding,\n name=name)", "async def infer_max_pool2d_grad(\n self,\n engine,\n input: lib.AbstractArray,\n kernel_size: lib.u64tup_typecheck,\n stride: lib.u64tup_typecheck,\n padding: lib.u64tup_typecheck,\n dilation: lib.u64tup_typecheck,\n ceil_mode: xtype.Bool,\n dout: lib.AbstractArray,\n):\n return input", "def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):\n filter_size = [conv_ksize[0], conv_ksize[1], x_tensor.get_shape().as_list()[3], conv_num_outputs]\n weight = tf.Variable(tf.truncated_normal(filter_size, stddev = 0.01))\n conv = tf.nn.conv2d(x_tensor, weight, [1, conv_strides[0], conv_strides[1], 1], padding = \"SAME\")\n \n bias = tf.Variable(tf.zeros([conv_num_outputs]))\n \n conv = tf.nn.bias_add(conv, bias)\n conv = tf.nn.relu(conv)\n \n conv = tf.nn.max_pool(conv, [1, pool_ksize[0], pool_ksize[1], 1], [1, pool_strides[0], pool_strides[1], 1], padding = \"SAME\")\n \n return conv", "def _get_2d_pooling_layer(\n num_rows_in_window, num_columns_in_window, num_rows_per_stride,\n num_columns_per_stride, do_max_pooling=True):\n\n if do_max_pooling:\n return keras.layers.MaxPooling2D(\n pool_size=(num_rows_in_window, num_columns_in_window),\n strides=(num_rows_per_stride, num_columns_per_stride),\n padding='valid'\n )\n\n return keras.layers.AveragePooling2D(\n pool_size=(num_rows_in_window, num_columns_in_window),\n strides=(num_rows_per_stride, num_columns_per_stride),\n padding='valid'\n )", "def pool(images, kernel_shape, stride, mode='max'):\n # getting information\n m, ih, iw, ic = images.shape\n kh, kw = kernel_shape\n sh, sw = stride\n\n # getting shape of convolutional matrix\n new_h = int(((ih - kh) / sh) + 1)\n new_w = int(((iw - kw) / sw) + 1)\n conv = np.zeros((m, new_h, new_w, ic))\n\n for i in range(new_h):\n for j in range(new_w):\n part = images[:, (i * sh): (i * sh) + kh,\n (j * sw): (j * sw) + kw]\n # here we get the new matrix of matrices\n if mode == 'max':\n result = np.max(part, axis=1)\n result = np.max(result, axis=1)\n if mode == 'avg':\n result = np.mean(part, axis=1)\n result = np.mean(result, axis=1)\n conv[:, i, j] = result\n return conv", "def check_output_dim_with_ksize_stride(padding, input_gard_shape, y_shape,\n ksize, strides, dilation, ceil_mode):\n util.check_tensor_shape_size(ksize)\n util.check_tensor_shape_size(strides)\n if len(ksize) < ATTR_SHAPE_MIN or len(strides) < ATTR_SHAPE_MIN:\n raise RuntimeError(\n \"The shape length of ksize or strides must be more than 4\")\n if ksize[0] != 1 or ksize[3] != 1:\n raise RuntimeError(\n \"MaxPoolGradWithArgmax only supports pooling across width/height,\"\n \"and other ksize dimension should be one\")\n if strides[0] != 1 or strides[3] != 1:\n raise RuntimeError(\n \"MaxPoolGradWithArgmax only supports pooling across width/height,\"\n \"and other strides dimension should be one\")\n if ksize[1] * ksize[2] > 255:\n raise RuntimeError(\n \"invalid window params, window_h*window_w should be <=255\")\n\n input_height = y_shape[2]\n input_width = y_shape[3]\n input_batch = y_shape[0]\n xc1 = y_shape[1]\n xc0 = y_shape[4]\n output_height = input_gard_shape[2]\n output_width = input_gard_shape[3]\n windowh = ksize[1]\n windoww = ksize[2]\n dyn = input_gard_shape[0]\n dyc1 = input_gard_shape[1]\n dyc0 = input_gard_shape[4]\n pad_h = padding[1]\n pad_w = padding[2]\n stride_h = strides[1]\n stride_w = strides[2]\n dilation_h = dilation[1]\n dilation_w = dilation[2]\n\n dyh = _pooling_output_shape(input_height, windowh, pad_h, stride_h,\n dilation_h, ceil_mode)\n dyw = _pooling_output_shape(input_width, windoww, pad_w, stride_w,\n dilation_w, ceil_mode)\n\n if ksize[1] >= input_height or ksize[2] >= input_width:\n raise RuntimeError(\"can not support global pooling now\")\n\n if dyh != output_height or dyw != output_width or \\\n input_batch != dyn or xc1 != dyc1 or xc0 != dyc0:\n raise RuntimeError(\"dimentions of dx dy \\\n padMode window stride is wrong,please check!\")", "def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):", "def _modify_conv_stride_dilation(\n conv: nn.Conv2d,\n stride: Tuple[int, int] = (1, 1),\n padding: int = None,\n) -> None:\n conv.stride = stride\n\n if padding is not None:\n conv.padding = padding", "def filter2d(\n x: torch.Tensor,\n kernel: torch.Tensor,\n padding: Union[int, Tuple[int, int]] = 0,\n) -> torch.Tensor:\n\n return F.conv2d(x, kernel, padding=padding, groups=x.size(1))", "def _avgpool2d_im2col_reduce(\n x: AbstractTensor,\n pool_size: Tuple[int, int],\n strides: Tuple[int, int],\n padding: str,\n) -> AbstractTensor:\n batch, channels, height, width = x.shape\n pool_height, pool_width = pool_size\n\n if padding == \"SAME\":\n out_height = ceil(int(height) / strides[0])\n out_width = ceil(int(width) / strides[1])\n else:\n out_height = ceil((int(height) - pool_size[0] + 1) / strides[0])\n out_width = ceil((int(width) - pool_size[1] + 1) / strides[1])\n\n x_split = x.reshape((batch * channels, 1, height, width))\n x_cols = x_split.im2col(pool_height, pool_width, strides, padding)\n x_cols_sum = x_cols.reduce_sum(axis=0)\n return x_cols_sum.reshape([out_height, out_width, batch, channels]).transpose(\n [2, 3, 0, 1]\n )", "def maxpool2d(X, k=2):\n\treturn tf.nn.max_pool(X, ksize=[1,k,k,1], strides=[1,k,k,1],\n\t\t\t\t\t\t padding='VALID')", "def _get_2d_conv_layer(\n num_rows_in_filter, num_columns_in_filter, num_rows_per_stride,\n num_columns_per_stride, num_filters, use_edge_padding=True,\n weight_regularizer=None):\n\n return keras.layers.Conv2D(\n filters=num_filters,\n kernel_size=(num_rows_in_filter, num_columns_in_filter),\n strides=(num_rows_per_stride, num_columns_per_stride),\n padding='same' if use_edge_padding else 'valid',\n dilation_rate=(1, 1), activation=None, use_bias=True,\n kernel_initializer=KERNEL_INITIALIZER_NAME,\n bias_initializer=BIAS_INITIALIZER_NAME,\n kernel_regularizer=weight_regularizer,\n bias_regularizer=weight_regularizer\n )", "def nasnet_maxpool():\n return nn.MaxPool2D(\n pool_size=3,\n strides=2,\n padding=1)", "def convolve_grayscale_padding(images, kernel, padding):\n m, image_h, image_w = images.shape\n kernel_h, kernel_w = kernel.shape\n\n padding_h, padding_w = padding\n output_h = image_h + (2 * padding_h) - kernel_h + 1\n output_w = image_w + (2 * padding_w) - kernel_w + 1\n\n conv_output = np.zeros((m, output_h, output_w))\n\n img_m = np.arange(0, m)\n images = np.pad(\n images,\n [(0, 0), (padding_h, padding_h), (padding_w, padding_w)],\n mode='constant',\n constant_values=0)\n\n for i in range(output_h):\n for j in range(output_w):\n multiply = images[img_m, i:kernel_h+i, j:kernel_w+j]\n conv_output[img_m, i, j] = np.sum(\n np.multiply(multiply, kernel), axis=(1, 2))\n return conv_output", "def DarknetConv2D(*args, **kwargs):\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides') == (2, 2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)", "def pool_layer(self, name, method, pool, pad, stride, image_size):\n \n # FIX: ignore padding [0 1 0 1]\n\n if method == 'max':\n layer = MaxPooling(name=name, pooling_size=pool, step=stride, input_dim=image_size)\n else:\n raise Exception(\"Unsupported pooling method: %s\" % method)\n\n return (layer, layer.get_dim(\"output\"))", "def conv1d(x, w, stride=1, b=None, padding=\"valid\"):\n assert type(stride) is int, \"Integer strides only!\"\n\n # Getting some layer variables for use so we don't have to keep getting them\n n_in = x.shape[1]\n x_in = np.copy(x) # We are going to potentially pad this, so should copy it\n nfilters = w.shape[-1]\n k = w.shape[0]\n\n if padding == \"same\":\n pad = max(k - 1, 0)\n l_p = pad // 2\n r_p = pad - l_p\n x_in = np.pad(x_in, ((0, 0), (l_p, r_p), (0, 0)), \"constant\")\n n_out = ceil(n_in / stride)\n else:\n n_out = int((n_in - k) / stride + 1)\n\n i = 0\n j = 0\n # This array is size\n # (batch size, number of convolved datapoints, number of output filters)\n result = np.zeros((x.shape[0], n_out, nfilters))\n # Loop over strides keeping track of j as the index in the\n # result array.\n while i < (x_in.shape[1] - k + 1):\n # Collapse the last two dimensions, that is collapse\n # (batch size, kernel size, input dimension)\n # to\n # (batch size, kernel size * input dimension)\n x1 = np.reshape(x_in[:, i:i+k, :], (x_in.shape[0], -1))\n # Collapse the weights array from\n # (kernel size, input dimension, output dimension)\n # to\n # (kernel size * input dimension, output dimension)\n w1 = w.reshape((-1, nfilters))\n # Dot product for this stage of the convolution\n # Output is\n # (batch_size, output dimension)\n y = np.dot(x1, w1)\n\n # Store the output for returning\n result[:, j] = y\n i += stride\n j += 1\n\n # Make the bias vector if one wasn't passed\n if b is None:\n b = np.zeros(nfilters)\n\n return result + b # Adding bias", "def convolution_pooling(prev_layer, n_filters, hype_space):\n current_layer = tensorflow.keras.layers.Conv2D(\n filters=n_filters, kernel_size=(3, 3), strides=(2, 2),\n padding='same', activation='linear',\n kernel_regularizer=tensorflow.keras.regularizers.l2(\n STARTING_L2_REG * hype_space['l2_weight_reg_mult'])\n )(prev_layer)\n\n if hype_space['use_BN']:\n current_layer = bn(current_layer)\n\n return current_layer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the form is valid stores the object and return success
def form_valid(self, form): # stores the object self.object = form.save() # send the json response return self.json_response({'success': True})
[ "def form_valid(self, form):\n form.save()\n return redirect(self.get_success_url())", "def form_valid(self, form):\n self.object = form.save(\n author=ReviewAuthor.objects.get(user=self.request.user),\n book=Book.objects.get(id=self.kwargs['pk']))\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form, other_form):\n\n self.object = form.save()\n\n ## Set pointer to master record and save the other object\n self.other_object = other_form.save(commit=False)\n self.other_object.pk = self.object.pk\n self.other_object.save()\n\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form, ingredient_form, instruction_form):\n\n self.object = form.save()\n ingredient_form.instance = self.object\n ingredient_form.save()\n instruction_form.instance = self.object\n instruction_form.save()\n return HttpResponseRedirect(\n reverse('mealplanner:recipe-detail', kwargs={'pk': self.object.pk})\n )", "def form_valid(self, form, ingredient_form, instruction_form):\n\n self.object = form.save(commit=False)\n self.object.author = self.request.user\n self.object.save()\n ingredient_form.instance = self.object\n ingredient_form.save()\n instruction_form.instance = self.object\n instruction_form.save()\n return HttpResponseRedirect(\n reverse('mealplanner:recipe-detail', kwargs={'pk': self.object.pk})\n )", "def form_valid(self, form):\r\n raise NotImplementedError(\"You must override form_valid.\")", "def validate(self):\r\n self.is_valid = True", "def form_valid(self, form):\n user = form.save()\n login(self.request, user)\n return HttpResponseRedirect(self.get_success_url())", "def handle_form(self, form):\n return form.handle(self.request, form.cleaned_data, \n application=self.object)", "def validate_payment_form(self):\n form = self.payment_form_cls(self.request.POST) \n if form.is_valid():\n success = form.process(self.request, self.item)\n if success:\n payment_was_successful.send(sender=self.item)\n return HttpResponseRedirect(self.success_url)\n else:\n self.context['errors'] = self.errors['processing']\n\n self.context[self.form_context_name] = form\n self.context.setdefault(\"errors\", self.errors['form'])\n return render_to_response(self.payment_template, self.context, RequestContext(self.request))", "def form_valid(self, form) -> HttpResponseRedirect:\n review = Review(title=form.cleaned_data['title'],\n text=form.cleaned_data['text'],\n rating=form.cleaned_data['rating'],\n book_id=self.kwargs.get('pk'),\n creator_id=self.request.user.pk)\n review.save()\n\n messages.success(self.request, 'Review has been added')\n return HttpResponseRedirect(reverse('review-detail', kwargs={'pk': review.pk}))", "def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.photographer = UserProfile.objects.get(user=self.request.user)\n self.object.save()\n form.save_m2m()\n return HttpResponseRedirect(self.get_success_url())", "def post(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n return HttpResponseForbidden()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def form_valid(self, form):\n # The JWT message is validated; now check the message's contents.\n if (\"username\" not in form.cleaned_data) or (\n \"password\" not in form.cleaned_data\n ):\n return HttpResponseBadRequest(\n \"username and/or password are missing from the JWT message\"\n )\n\n portal = Portal.objects.get(sso_key=form.cleaned_data[\"iss\"])\n # Verify the username/password\n user = django_authenticate(\n username=form.cleaned_data.get(\"username\"),\n password=form.cleaned_data.get(\"password\"),\n )\n if not user:\n logger.info(\n \"Credentials for %s don't match (requested by portal %s)\",\n form.cleaned_data.get(\"username\"),\n portal,\n )\n raise PermissionDenied(\"Login failed\")\n if not user.is_active:\n raise PermissionDenied(\"User is inactive\")\n logger.info(\n \"Credentials for user %s checked succesfully for portal %s\", user, portal\n )\n user_data = construct_user_data(user=user)\n return HttpResponse(\n json.dumps({\"user\": user_data}), content_type=\"application/json\"\n )", "def after_valid_form_submission(self):", "def _TryToSaveForm(save_form):\n errors = save_form.errors\n if not errors:\n try:\n ob = save_form.save(commit=False)\n except ValueError, err:\n errors['__all__'] = unicode(err)\n if not errors:\n ob.put()\n return not errors", "def test_success_validation_for_answer(self):\n\n form = VoteForm(self.params, current_user=self.user, obj=self.answer)\n self.assertTrue(form.is_valid())", "def form_valid(self, form):\n member = Membership.objects.create(user=self.request.user)\n form.instance.owner = member\n # save the Org instance to enable post-save actions\n self.object = form.save()\n self.object.members.add(member)\n self.object.users.add(self.request.user)\n member.organization = self.object\n member.save()\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self):\n return HttpResponseRedirect(self.ticket.get_absolute_url())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the form is invalid return not success and errors found.
def form_invalid(self, form): # send the json response and errors return self.json_response({'success': False, 'errors': form.errors}, status=400)
[ "def form_invalid(self, form):\n print(form.errors)\n return render_to_response(self.get_context_data(form=form))", "def test_failed_form_validation(self):\n\n form = AnswerForm()\n self.assertFalse(form.is_valid())", "def are_fields_invalid(self, request):\n # Sends request to each error function and returns first error it sees\n if not is_valid_email(request):\n self.error = 'Invalid Email'\n error_number = 1\n invalid_option = 'email'\n elif not validate_name(request):\n self.error = 'Invalid Name'\n error_number = 2\n invalid_option = 'name'\n elif (not (is_hidden_field_empty(request) and\n is_valid_token(request)) or\n not (is_valid_fields_to_join(request))):\n self.error = 'Improper Form Submission'\n error_number = 3\n invalid_option = 'name'\n elif self.controller.is_rate_violation():\n self.error = 'Too Many Requests'\n error_number = 4\n invalid_option = 'name'\n elif self.controller.is_duplicate(create_msg(request)):\n self.error = 'Duplicate Request'\n error_number = 5\n invalid_option = 'name'\n elif not is_valid_recaptcha(request):\n self.error = 'Invalid Recaptcha'\n error_number = 6\n invalid_option = 'name'\n else:\n # If nothing above is true, there is no error\n return False\n # There is an error if it got this far\n self.logger.warning('formsender: received %s: %s from %s',\n self.error,\n request.form[invalid_option],\n request.form['email'])\n return error_number", "def form_invalid(self):\n return self.render_to_response(self.get_context_data())", "def test_form_validation_for_errors(self):\n f = AskEmailForm(data={'email': 'wrong@mail'})\n\n errors = list(f.errors.values())[0]\n self.assertFalse(f.is_valid())\n self.assertIn('Enter a valid email address.', errors)\n self.assertIn('User with this email doesn\\'t exist.', errors)", "def form_invalid(self, form):\r\n raise NotImplementedError(\"You must override form_invalid.\")", "def test_failed_validation(self):\n\n form = VoteForm()\n self.assertFalse(form.is_valid())", "def form_invalid(self, form):\n messages.warning(\n self.request,\n 'Please check that required fields are filled out correctly.'\n )\n return super(MessageCreateView, self).form_invalid(form)", "def form_valid(self, form):\n try:\n doctors = self._get_doctor_list(form)\n if doctors:\n template_name = 'available_doctor_list.html'\n context_data = {'doctors': doctors}\n return render(self.request, template_name, context_data)\n except Exception as exp: # noqa\n # Log the exception\n pass\n\n # Return error message\n self.form_invalid(form)", "def form_invalid(self, form):\n if self.request.method == 'POST' and self.request.is_ajax():\n return JsonResponse('Error', safe=False)\n else:\n return super(ArticleUpdate, self).form_invalid(form)", "def non_field_errors(self):\n\n return self.form.non_field_errors() if self.primary_fieldset else ErrorList()", "def validate_payment_form(self):\n form = self.payment_form_cls(self.request.POST) \n if form.is_valid():\n success = form.process(self.request, self.item)\n if success:\n payment_was_successful.send(sender=self.item)\n return HttpResponseRedirect(self.success_url)\n else:\n self.context['errors'] = self.errors['processing']\n\n self.context[self.form_context_name] = form\n self.context.setdefault(\"errors\", self.errors['form'])\n return render_to_response(self.payment_template, self.context, RequestContext(self.request))", "def _check_form_errors(self, expects):\n if not 'expect_form_errors' in expects:\n return\n\n tmpl_data = _get_tmpl_data()\n for form, field, num_errors in expects['expect_form_errors']:\n field = getattr(tmpl_data[form], field)\n eq_(num_errors, len(field.errors))", "def test_invalid_form_failure(self):\n payload = {\n 'url': self.google_form_url,\n 'inputs': {\n 'entry.505110784': 'accepts nuber only',\n 'entry.1915963433': '',\n 'entry.948181294': '',\n 'entry.700448681': 'C'\n }\n }\n\n response = self.lg.handle_request(\n method='POST',\n path='/google-forms',\n headers={'Content-Type': 'application/json'},\n body=json.dumps(payload)\n )\n\n assert response['statusCode'] == 422\n assert json.loads(response['body']) == {\n 'errors': 'The given data was invalid.',\n 'validations': {\n 'entry.1915963433': 'This is a required question',\n 'entry.505110784': 'Must be a number',\n 'entry.948181294': 'This is a required question'\n }\n }", "def _TryToSaveForm(save_form):\n errors = save_form.errors\n if not errors:\n try:\n ob = save_form.save(commit=False)\n except ValueError, err:\n errors['__all__'] = unicode(err)\n if not errors:\n ob.put()\n return not errors", "def field_errors(self):\n warnings.warn(\n 'WTForms has form-level validation now, use form.errors instead', DeprecationWarning, 2\n )\n return self.errors", "def flash_form_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash('{0}: {1}'.format(field, error))", "def _check_form_validity(self):\n\n for idsp in self._idsp_input:\n if not idsp.form_is_valid():\n self._invalid_input_eh()\n return\n\n self._valid_input_eh()", "def flash_form_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash(\n \"%s: %s\" % (getattr(form, field).label.text, error),\n \"alert-danger\",\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform batch from dataset for the text/label pipelines. Creates lists of labels, text tokens and offsets.
def collate_batch(batch): label_list, text_list, offsets = [], [], [0] for (_text, _label) in batch: label_list.append(label_pipeline(_label)) processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) text_list.append(processed_text) offsets.append(processed_text.size(0)) label_list = torch.tensor(label_list, dtype=torch.int64) offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) text_list = torch.cat(text_list) return label_list.to(device), text_list.to(device), offsets.to(device)
[ "def process_input(text):\n global tokenizer\n\n inputs = tokenizer(text, return_tensors=\"pt\")\n labels = torch.tensor([1]).unsqueeze(0)\n\n return inputs, labels", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def reshape_data(labels_in_data, sentences_in_data, word_as_ex=True, sentence_wise=False, num_steps=0):\n\n _sentences_in_data = []\n _labels_in_data = []\n _weights_in_data = []\n if word_as_ex and sentence_wise == False:\n for sent in sentences_in_data:\n for w_idx in sent: _sentences_in_data.append( w_idx )\n\n for label in labels_in_data:\n for l_idx in label: _labels_in_data.append( l_idx )\n\n # _sentences_in_data : a list of word index\n # _labels_in_data : a list of label index\n return _labels_in_data, _sentences_in_data\n\n if sentence_wise:\n for s_idx, sent in enumerate(sentences_in_data):\n _sent = [ w_idx for w_idx in sent ] \n \n # padding to right side\n num_pad = num_steps - len(_sent)\n for i in range(num_pad): _sent.append( 0 ) # 0 = PADDING ID\n _sentences_in_data.append( _sent )\n\n for label in labels_in_data:\n _label = [ l_idx for l_idx in label ] \n _weight = [ 1.0 for i in range( len(label) ) ]\n\n # padding to right side\n num_pad = num_steps - len(_label)\n for i in range(num_pad): _label.append( 0 ) # 0 = PADDING ID\n _labels_in_data.append( _label )\n\n # padding label should be weighted as 0.0\n #for i in range(num_pad): _weight.append( 0.0 ) # --> if you want to ignore pad label to calculate loss. 0.0 weight for pad \n for i in range(num_pad): _weight.append( 1.0 ) # --> for this tutorial, just train pad as well as other symbols to get high precision since this scripts use in_top_k api to calculate precision.\n _weights_in_data.append( _weight )\n\n # _sentences_in_data : a list of list - word index shape = num_examples x num_steps\n # _labels_in_data : a list of list - label index shape = num_examples x num_steps\n # _weights_in_data : a list of list - weight shape = num_examples x num_steps\n return _labels_in_data, _sentences_in_data, _weights_in_data", "def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):\n tensor_datasets = []\n for dataset in encoded_datasets:\n n_batch = len(dataset[0])\n\n input_ids = np.zeros((n_batch, 1, input_len), dtype=np.int64)\n mc_token_ids = np.zeros((n_batch, 1), dtype=np.int64)\n lm_labels = np.full((n_batch, 1, input_len), fill_value=-1, dtype=np.int64)\n mc_labels = np.zeros((n_batch,), dtype=np.int64)\n \n for i, (story, mc_label) in enumerate(zip(*dataset)):\n with_cont1 = [start_token] + story[:cap_length] + [clf_token]\n input_ids[i, 0, :len(with_cont1)] = with_cont1\n \n mc_token_ids[i, 0] = len(with_cont1) - 1\n\n lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:]\n\n mc_labels[i] = mc_label\n all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)\n tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))\n return tensor_datasets", "def _preprocess_dataset(src_data, tokenizer):\n dataset = []\n max_length = 0\n for (tag, sent) in src_data:\n token_ids = tokenizer.encode(sent)\n dataset.append({'src': token_ids, 'tgt': tag})\n if max_length < len(token_ids):\n max_length = len(token_ids)\n\n return dataset, max_length", "def transform(self, X, y=None, entity_labels=None):\n log.info(\"Generating features for {} documents...\".format(len(X)))\n tokens_per_doc, labels_per_doc = \\\n transform_annotated_documents_to_bio_format(X, entity_labels=entity_labels)\n tokens_flat = [token for tokens in tokens_per_doc for token in tokens]\n labels_flat = [label for labels in labels_per_doc for label in labels]\n pos_tags_flat = [pos_tag for tokens in tokens_per_doc for pos_tag in tokens_to_pos_tags(tokens)]\n\n features_flat = [self._word_to_features(token) for token in tokens_flat]\n for word_features, pos_tag in zip(features_flat, pos_tags_flat):\n word_features.append(pos_tag)\n\n if not self.encoders:\n # first time run\n for idx in range(len(features_flat[0])):\n if isinstance(features_flat[0][idx], str):\n self.encoders[idx] = LabelEncoder()\n column_vector = [features_flat[i][idx] for i in range(len(features_flat))]\n column_vector.append(UNKNOWN_WORD)\n self.encoders[idx].fit(column_vector)\n\n for idx in range(len(features_flat[0])):\n if idx in self.encoders:\n column_vector = [features_flat[i][idx] for i in range(len(features_flat))]\n self._process_unknown_values(column_vector, self.encoders[idx].classes_.tolist(), UNKNOWN_WORD)\n column_vector = self.encoders[idx].transform(column_vector).tolist()\n for i in range(len(features_flat)):\n features_flat[i][idx] = column_vector[i]\n\n return features_flat, tokens_flat, labels_flat", "def preprocess_dataset(dataset):\n return [preprocess(document) for document in dataset]", "def process_batch(self, ids):\n datas = self.db.get_row_by_ids(ids)\n predicted_results = []\n for unlabeled_data in datas:\n predict = self.interpreter.parse(unlabeled_data[\"text\"])\n if predict:\n unlabeled_data.update(predict)\n predicted_results.append(unlabeled_data)\n return predicted_results", "def make_labeled_data_for_predictor(graph, raw_point, tokenizer):\n M = len(graph.tokens)\n # print(f\"making labels for point {raw_point[0]}\") #CLEANUP\n # print(f\"query of that point: {raw_point[2]}\") #CLEANUP\n # print(f\"length of graph's tokens (=M): {M}\") #CLEANUP\n # print(f\"raw_point context:\\n{raw_point[3]}\\n\\n\") #CLEANUP\n\n sup_labels = torch.zeros(M, dtype=torch.long) # CrossEntropyLoss needs dtype=torch.long\n start_label = torch.zeros(1, dtype=torch.long)\n end_label = torch.zeros(1, dtype=torch.long)\n type_labels = torch.zeros(1, dtype=torch.long)\n\n answer = raw_point[4].lower()\n\n # get answer type\n if answer == \"yes\":\n type_labels[0] = 0\n elif answer == \"no\":\n type_labels[0] = 1\n else:\n type_labels[0] = 2\n\n # if the answer is not \"yes\" or \"no\", its a span\n if type_labels[0] == 2:\n for i, token in enumerate(graph.tokens):\n if answer.startswith(token) and start_label==0:\n start_label[0] = i # take the first start token's index as the label\n if answer.endswith(token):\n end_label[0] = i # take the last end token's index as label\n\n # get supporting facts (paragraphs)\n list_context = [[p[0] + \" \"] + p[1] for p in graph.context] # squeeze header into the paragraph\n num_sentences = sum([len(p) for p in list_context]) # number of sentence, including headers\n\n # use an extra tokenizer again in order to have the correct number of tokens in order to determine position later\n tokenized_sentences = [[tokenizer.tokenize(s) for s in p] for p in list_context] # list[list[list[str]]]\n\n position = 0\n sent_position = 0\n for i, para in enumerate(graph.context):\n if raw_point[1].get(para[0]): # para title in supporting facts\n for j, sent in enumerate(tokenized_sentences[i]):\n if j - 1 in raw_point[1][para[0]]: # the 0th sentence is the paragraph title, j - 1 accounts for that\n sup_labels[ position : position + len(sent) ] = 1 # fill with 1's from position to position + len(sent)\n position += len(sent) # update position\n sent_position += 1\n else: # if the paragraph does not have any supporting facts, update our position with the total paragraph length\n position += sum([len(sent) for sent in tokenized_sentences[i]])\n sent_position += len(tokenized_sentences[i])\n\n return sup_labels, start_label, end_label, type_labels # M, M, M, 1", "def predict_tags(model, sess, token_idxs_batch, lengths, idx2tok, idx2tag):\n tag_idxs_batch = model.predict_for_batch(sess, token_idxs_batch, lengths)\n tags_batch, tokens_batch = [], []\n for tag_idxs, token_idxs in zip(tag_idxs_batch, token_idxs_batch):\n tags, tokens = [], []\n for tag_idx, token_idx in zip(tag_idxs, token_idxs):\n tags.append(idx2tag[tag_idx])\n tokens.append(idx2tok[token_idx])\n\n tags_batch.append(tags)\n tokens_batch.append(tokens)\n\n return tags_batch, tokens_batch", "def prepare_batch(batch_data, batch_sent_lens):\n cur_max = max(batch_sent_lens) # get max sequence length for the batch\n batch_data = [sent_tokens[0:cur_max] for sent_tokens, len_ in zip(batch_data, batch_sent_lens)] #list of list\n return batch_data", "def infer_batch(self, batch):\n\t\t\n\t\t# decode \n num_batch_elements = len(batch.images)\n eval_list = [self.decoder]\n feed_dict = {self.input_images : batch.images, self.seq_len : [Model.max_text_len] * num_batch_elements, self.is_train: False}\n eval_res = self.sess.run(eval_list, feed_dict)\n decoded = eval_res[0]\n texts = self.decoder_output_to_text(decoded, num_batch_elements)\n\n return texts", "def data_processing(labels_df, x_train, y_train, label_map):\n subset = str()\n\n if labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 16 or labels_df.shape[0] == 64:\n batch_size = 8 ### Modified for smaller images\n subset = \"train\"\n elif labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8:\n batch_size = 4\n subset = \"valid\"\n elif labels_df.shape[0] == 40669:\n batch_size = 4\n subset = \"test\" \n elif labels_df.shape[0] == 20522:\n batch_size = 2\n subset = \"test-add\" \n else:\n raise ValueError('The dataset format is different than expected')\n\n label_map = label_map\n# images_size = (256, 256)\n images_size = (64, 64)\n\n # Iterate through batches of rows of the dataset\n for i in range(labels_df.shape[0]//batch_size):\n \n temp_labels_df = labels_df.iloc[i*batch_size:((i+1)*batch_size) , :]\n \n # Iterate through the samples batch and create x and y for training\n for f, tags in tqdm(temp_labels_df.values, miniters=100):\n # load a .tif file\n img = io.imread('data/{}-jpg/{}.jpg'.format(subset,f)) ######## Modified for train jpg folder\n img = transform.resize(img, images_size)\n\n### Removed for use of JPEG files:\n# # Add NDVI layer // Removed for usage of JPG files\n# np.seterr(all='warn') # divide by zero, NaN values\n# img_ndvi = np.expand_dims((img[:, :, 3] - img[:, :, 2]) / (img[:, :, 3] + img[:, :, 2]), axis=2) # (NIR - RED) / (NIR + RED)\n# img = np.concatenate((img, img_ndvi), axis=2)\n \n # Create the target array for an image\n targets = np.zeros(17)\n for t in tags.split(' '):\n targets[label_map[t]] = 1 \n\n x_train.append(img)\n y_train.append(targets)\n\n # Format values\n y_train = np.array(y_train, np.uint8)\n x_train = np.array(x_train, np.float16) / 255.\n\n### Removed for use of JPEG files: \n# x_train = np.array(x_train, np.float16) / 65536.\n#### x_train -= 0.5\n#### x_train *= 2 \n\n\n # Save subsets in npz files\n np.save('data/{}-npy/npdatasetX{}'.format(subset, i), x_train)\n x_train = []\n np.save('data/{}-npy/npdatasetY{}'.format(subset, i), y_train)\n y_train = []\n #print \"{} data saved\".format(subset)", "def do_tagging(text, config, model):\n # load data and tokenization\n source = []\n token_pos_list = []\n # print('Tokenize sentences')\n for sent in sent_tokenizer(text):\n tokens = [token.text for token in sent]\n token_pos = [token.idx for token in sent]\n source.append(tokens)\n token_pos_list.append(token_pos)\n\n dataset = SnippextDataset(source, config['vocab'], config['name'],\n lm=model.lm,\n max_len=64)\n iterator = data.DataLoader(dataset=dataset,\n batch_size=32,\n shuffle=False,\n num_workers=0,\n collate_fn=SnippextDataset.pad)\n\n # prediction\n model.eval()\n Words, Is_heads, Tags, Y, Y_hat = [], [], [], [], []\n with torch.no_grad():\n # print('Tagging')\n for i, batch in enumerate(iterator):\n try:\n words, x, is_heads, tags, mask, y, seqlens, taskname = batch\n taskname = taskname[0]\n _, _, y_hat = model(x, y, task=taskname) # y_hat: (N, T)\n\n Words.extend(words)\n Is_heads.extend(is_heads)\n Tags.extend(tags)\n Y.extend(y.numpy().tolist())\n Y_hat.extend(y_hat.cpu().numpy().tolist())\n except:\n print('error @', batch)\n\n # gets results and save\n results = []\n for words, is_heads, tags, y_hat in zip(Words, Is_heads, Tags, Y_hat):\n y_hat = [hat for head, hat in zip(is_heads, y_hat) if head == 1]\n # remove the first and the last token\n preds = [dataset.idx2tag[hat] for hat in y_hat][1:-1]\n results.append(preds)\n\n return source, token_pos_list, results", "def producte_token_labeling_list(self):\n path_to_token_labeling_file = os.path.join(self.path_to_label_file, \"token_label_out.txt\")\n token_labeling_list = self._get_token_labeling_list(path_to_token_labeling_file)\n path_to_token_labeling_test_results_file = os.path.join(self.path_to_predict_label_file,\n \"token_label_prediction_test_results.txt\")\n predict_token_labeling_list = self._get_predict_token_labeling_list(path_to_token_labeling_test_results_file)\n token_labeling_test_list = []\n clean_predict_token_labeling_list = []\n seqence_length_dont_match_index = 0\n for y_test, y_predict in zip(token_labeling_list, predict_token_labeling_list):\n y_predict = y_predict[1:-1] # y_predict.remove('[CLS]') #y_predict.remove('[SEP]')\n while '[Padding]' in y_predict:\n print(\"X\" * 100)\n y_predict.remove('[Padding]')\n while '[##WordPiece]' in y_predict:\n y_predict.remove('[##WordPiece]')\n while '[##WordPiece]' in y_test:\n y_test.remove('[##WordPiece]')\n if len(y_predict) > len(y_test):\n print(y_predict)\n print(y_test)\n print(\"~*\" * 100)\n seqence_length_dont_match_index += 1\n y_predict = y_predict[0:len(y_test)]\n elif len(y_predict) < len(y_test):\n print(y_predict)\n print(y_test)\n print(\"~\" * 100)\n y_predict = y_predict + [\"O\"] * (len(y_test) - len(y_predict))\n seqence_length_dont_match_index += 1\n assert len(y_predict) == len(y_test)\n # 如果有较多的预测句子与正确句子长度不匹配(> 句子总数的1%),说明不能用上述简单方法处理预测出来的句子\n #assert seqence_length_dont_match_index < int(len(token_labeling_list) * 0.01)\n token_labeling_test_list.extend(y_test)\n clean_predict_token_labeling_list.extend(y_predict)\n if \"[CLS]\" in clean_predict_token_labeling_list:\n print(\"[CLS] doesn't just appear at the beginning of a sentence.\")\n clean_predict_token_labeling_list = [y_p.replace(\"[CLS]\", \"O\") for y_p in clean_predict_token_labeling_list]\n print(\"[CLS]\" * 10 + \"\\n\")\n if \"[SEP]\" in clean_predict_token_labeling_list:\n print(\"[SEP] doesn't just appear at the end of a sentence.\")\n clean_predict_token_labeling_list = [y_p.replace(\"[SEP]\", \"O\") for y_p in clean_predict_token_labeling_list]\n print(\"[SEP]\" * 10 + \"\\n\")\n print(\"seqence_length_dont_match numbers\", seqence_length_dont_match_index)\n return token_labeling_test_list, clean_predict_token_labeling_list", "def convertDataToTrainingBatch(premiseIdxMat, timestepsPremise, hypothesisIdxMat,\n timestepsHypothesis, pad, embeddingTable, labels, minibatch):\n if pad == 'right':\n batchPremise = premiseIdxMat[0:timestepsPremise, minibatch, :]\n batchHypothesis = hypothesisIdxMat[0:timestepsHypothesis, minibatch, :]\n else:\n batchPremise = premiseIdxMat[-timestepsPremise:, minibatch, :]\n batchHypothesis = hypothesisIdxMat[-timestepsHypothesis:, minibatch, :]\n\n batchPremiseTensor = embeddingTable.convertIdxMatToIdxTensor(batchPremise)\n batchHypothesisTensor = embeddingTable.convertIdxMatToIdxTensor(batchHypothesis)\n\n batchLabels = labels[minibatch]\n\n return batchPremiseTensor, batchHypothesisTensor, batchLabels", "def _prepare_split(docs, classes):\n texts = []\n labels = []\n for doc in docs:\n text = ' '.join(reuters.words(doc))\n clz = reuters.categories(doc)[0]\n texts.append(text)\n labels.append(classes.index(clz))\n\n return texts, labels", "def format_input(self):\n # to get the weight matrix for the embedding layer\n self.get_weights_matrix()\n\n try:\n shutil.rmtree('./data/inputs/word2vec')\n except:\n pass\n os.mkdir('./data/inputs/word2vec')\n\n self.path_sentences = './data/inputs/sentences.txt'\n self.path_labels = './data/inputs/labels.txt'\n self.path_sentences_output = './data/inputs/word2vec/sentences.npy'\n self.path_labels_output = './data/inputs/word2vec/labels.npy'\n\n with open(self.path_sentences, 'r+') as f:\n lines = f.readlines()\n max_lenght = max([len(line.split()) for line in lines])\n sentences = np.zeros((len(lines), max_lenght)) # size = samples x max lenght of sentences\n i = 0\n nb_unknown = 0\n nb_token = 0\n for line in lines:\n sentence_formated = []\n for word in line.split():\n nb_token += 1\n try:\n sentence_formated.append(self.index_dict[word.decode('utf8')])\n except:\n sentence_formated.append(0)\n nb_unknown += 1\n lenght = len(sentence_formated)\n sentences[i, :lenght] = sentence_formated[:lenght]\n i += 1\n print('there was', nb_unknown, 'unknown tokens out of', nb_token, 'total tokens, which account for', int((float(nb_unknown) / float(nb_token))*100), '% of all tokens')\n\n with open(self.path_labels, 'r+') as f:\n lines = f.readlines()\n lines = map(int, lines)\n lb = LabelBinarizer()\n labels = lb.fit_transform(lines)\n # labels = np.zeros((len(lines), 1))\n # i = 0\n # for line in lines:\n # labels[i] = line\n # i += 1\n\n with open(self.path_sentences_output, 'wb') as f:\n np.save(f, sentences)\n with open(self.path_labels_output, 'wb') as f:\n np.save(f, labels)\n\n print('shape of sentences (nb_sample, max_len):', sentences.shape)\n print('shape of labels (nb_sample):', labels.shape)\n return sentences, labels", "def make_labels(self):\n for lab in self.label_ids: #init label objects\n self.labels[lab] = Label(self.id, lab)\n for sentence in self.sentences: #dump stuff into the label objects\n for i in range(1, len(sentence.labels)):\n lab = sentence.labels[i]\n self.labels[lab].add_sentence(sentence.words[i], sentence.probs[lab], sentence.surprisal[lab])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrap model as DDP.
def init_distributed(self): self.model = DDP(self.model, device_ids=[self.device])
[ "def dedp(model):\n return model.module if isinstance(model, torch.nn.DataParallel) else model", "def DistributedFairseqModel(args, model):\n # determine which DDP class to extend\n assert isinstance(model, nn.Module)\n if args.ddp_backend == 'c10d':\n ddp_class = nn.parallel.DistributedDataParallel\n init_kwargs = dict(\n module=model,\n device_ids=[args.device_id],\n output_device=args.device_id,\n broadcast_buffers=False,\n bucket_cap_mb=args.bucket_cap_mb,\n )\n # Maintain backward compatibility\n if 'check_reduction' in inspect.getargspec(ddp_class)[0]:\n init_kwargs['check_reduction'] = True\n if 'find_unused_parameters' in inspect.getargspec(ddp_class)[0]:\n init_kwargs['find_unused_parameters'] = args.find_unused_parameters\n elif args.ddp_backend == 'no_c10d':\n ddp_class = LegacyDistributedDataParallel\n init_kwargs = dict(\n module=model,\n world_size=args.distributed_world_size,\n buffer_size=2**28,\n )\n else:\n raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend)\n\n class _DistributedFairseqModel(ddp_class):\n \"\"\"Extend DistributedDataParallel to check for missing\n attributes in the wrapped module.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __getattr__(self, name):\n wrapped_module = super().__getattr__('module')\n if hasattr(wrapped_module, name):\n return getattr(wrapped_module, name)\n return super().__getattr__(name)\n\n return _DistributedFairseqModel(**init_kwargs)", "def _is_ddp_wrapped(model: nn.Module) -> bool:\n parallel_wrappers = nn.DataParallel, nn.parallel.DistributedDataParallel\n\n # Check whether Apex is installed and if it is,\n # add Apex's DistributedDataParallel to list of checked types\n if SETTINGS.apex_required:\n from apex.parallel import DistributedDataParallel as apex_DDP\n\n parallel_wrappers = parallel_wrappers + (apex_DDP,)\n\n if SETTINGS.fairscale_required:\n from fairscale.nn.data_parallel import FullyShardedDataParallel, ShardedDataParallel\n\n parallel_wrappers = parallel_wrappers + (ShardedDataParallel, FullyShardedDataParallel)\n\n if SETTINGS.deepspeed_required:\n from deepspeed import DeepSpeedEngine, PipelineEngine\n\n parallel_wrappers = parallel_wrappers + (DeepSpeedEngine, PipelineEngine)\n\n return isinstance(model, parallel_wrappers)", "def make_model(self, data):\n return self.MODEL(**data)", "def _is_ddp_wrapped(model: nn.Module) -> bool:\n parallel_wrappers = nn.DataParallel, nn.parallel.DistributedDataParallel\n\n # Check whether Apex is installed and if it is,\n # add Apex's DistributedDataParallel to list of checked types\n if SETTINGS.apex_required:\n from apex.parallel import DistributedDataParallel as apex_DDP\n\n parallel_wrappers = parallel_wrappers + (apex_DDP,)\n\n if SETTINGS.fairscale_required:\n from fairscale.nn.data_parallel import (\n FullyShardedDataParallel,\n ShardedDataParallel,\n )\n\n parallel_wrappers = parallel_wrappers + (\n ShardedDataParallel,\n FullyShardedDataParallel,\n )\n\n if SETTINGS.deepspeed_required:\n from deepspeed import DeepSpeedEngine, PipelineEngine\n\n parallel_wrappers = parallel_wrappers + (DeepSpeedEngine, PipelineEngine)\n\n return isinstance(model, parallel_wrappers)", "def _create_motd_model_view(self):\n \n # Only do imports when you need to! This makes sure that the import\n # only happens when somebody needs the motd attribute.\n from motd.model.i_motd import IMOTD\n from motd.ui.motd_model_view import MOTDModelView\n \n # ask the application for an IMOTD instance\n motd = self.application.get_service(IMOTD)\n \n motd_model_view = MOTDModelView(model=motd)\n return motd_model_view", "def setupModelInstance(self, geom, dssatexe):\n return super(Model, self).setupModelInstance(geom, \"DSSAT_Ex.exe\")", "def get_dw(model):\n\n resid = model.resid\n return dw(resid)", "def _freeze_model(self):\n self.pdf = self.model(**self.pars).pdf", "def simple_drudge(spark_ctx):\n\n dr = Drudge(spark_ctx)\n\n n = symbols('n')\n r = Range('r', 0, n)\n dumms = symbols('a b c d e f g h')\n dr.set_dumms(r, dumms)\n dr.add_default_resolver(r)\n\n dr.n = n\n dr.r = r\n dr.ds = dumms\n\n return dr", "def ImputedDeepSignatureModel():\n name = 'ImputedDeepSignatureModel'", "def prepare_for_inference(model: TModel) -> TPModel:", "def model_to_dot_source(model) -> str:\n if model.need_sync:\n model.sync_template()\n\n template = model.template\n template_drawer = get_template_drawer(get_drawing_settings())\n\n return to_dot_source(template_drawer, template)", "def unwrap_model(model):\r\n return model.module if isinstance(\r\n model,\r\n (nn.DataParallel, nn.parallel.DistributedDataParallel)) else model", "def _get_inference_model(self):\n inference_model_path = os.path.join(self._task_path, \"static\",\n \"inference\")\n if not os.path.exists(inference_model_path + \".pdiparams\"):\n with dygraph_mode_guard():\n self._construct_model(self.model)\n self._construct_input_spec()\n self._convert_dygraph_to_static()\n\n model_file = inference_model_path + \".pdmodel\"\n params_file = inference_model_path + \".pdiparams\"\n self._config = paddle.inference.Config(model_file, params_file)\n self._prepare_static_mode()", "def model_instance(self) -> any:\n pass", "def _extract_pmodel(self):\n\n class OldParseModel(object):\n def __init__(self):\n self.classlist = {}\n self.modulemethods = []\n\n pmodel = OldParseModel()\n pmodel.classlist = self.classlist\n pmodel.modulemethods = self.modulemethods\n return pmodel", "def build_model(self):\n ...", "def configure_ddp(self):\n self.pre_configure_ddp()\n self._model = DistributedDataParallel(\n LightningDistributedModule(self.model),\n **self._ddp_kwargs,\n )\n self._register_ddp_hooks()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set device for model.
def set_device(self, device): self.device = device self.model = self.model.to(device)
[ "def device_class(self, value):\n\n self._device_class.set(value)", "def set_device_properties(device_uid, config):\n return runtime.set_device_properties(device_uid, config)", "def _set_cuda_device(device_id, verbose=None):\n import cupy\n\n cupy.cuda.Device(device_id).use()\n logger.info(\"Now using CUDA device {}\".format(device_id))", "def setup_device(\n model: nn.Module, target_devices: List[int]\n) -> Tuple[torch.device, List[int]]:\n available_devices = list(range(torch.cuda.device_count()))\n\n if not available_devices:\n log.warning(\n \"There's no GPU available on this machine. Training will be performed on CPU.\"\n )\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n if not target_devices:\n log.info(\"No GPU selected. Training will be performed on CPU.\")\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n max_target_gpu = max(target_devices)\n max_available_gpu = max(available_devices)\n\n if max_target_gpu > max_available_gpu:\n msg = (\n f\"Configuration requests GPU #{max_target_gpu} but only {max_available_gpu} \"\n \"available. Check the configuration and try again.\"\n )\n log.critical(msg)\n raise Exception(msg)\n\n log.info(f\"Using devices {target_devices} of available devices {available_devices}\")\n device = torch.device(f\"cuda:{target_devices[0]}\")\n if len(target_devices) > 1:\n model = nn.DataParallel(model, device_ids=target_devices)\n else:\n model = model.to(device)\n return model, device", "def hook_device_target(self, x):\n self.device_target = x", "def device_settings(self, device_settings):\n\n self._device_settings = device_settings", "def set_boot_device(self, task, device, persistent=False):\n oneview_info = common.get_oneview_info(task.node)\n\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(\n _(\"Invalid boot device %s specified.\") % device)\n\n LOG.debug(\"Setting boot device to %(device)s for node %(node)s\",\n {\"device\": device, \"node\": task.node.uuid})\n try:\n device_to_oneview = BOOT_DEVICE_MAPPING_TO_OV.get(device)\n self.oneview_client.set_boot_device(oneview_info,\n device_to_oneview,\n onetime=not persistent)\n except oneview_exceptions.OneViewException as oneview_exc:\n msg = (_(\n \"Error setting boot device on OneView. Error: %s\")\n % oneview_exc\n )\n raise exception.OneViewError(error=msg)", "def device_id(self, device_id: int):\n\n self._device_id = device_id", "def _set_deploy_boot_device(self, task):\n driver_info = _parse_driver_info(task.node)\n hw = _get_hw_library(driver_info)\n hw.set_boot_device(task)", "def set_device_id(device_id: Optional[Union[int, str]] = None) -> str:\r\n if device_id is not None and device_id != \"cpu\" and torch.cuda.is_available():\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(device_id)\r\n return \"cuda\"\r\n else:\r\n return \"cpu\"", "def hook_device_num(self, x):\n self.device_num = x", "def set_current_devices(self):\n new_devices, _ = self._get_devices()\n self.devices = new_devices", "def device_key(self, value):\n\n self._device_key.set(value)", "def cuda(self, device=None):\n if torch.cuda.is_available():\n self.is_cuda = True\n self.device = device\n self.network_module.cuda(device)\n print(\"Moving \", self.name, \" to GPU\")\n else:\n print(\"CUDA is unavailable\")\n return self", "def update_device(self, device_name, **kwargs):\n device_id = self.get_devices(name=device_name)[0]['id']\n return self.netbox_con.patch('/dcim/devices/', device_id, **kwargs)", "def set_device_id(idx):\n\n import ctypes as ct\n from .util import safe_call as safe_call\n from .library import backend as backend\n\n if (backend.name() != \"opencl\"):\n raise RuntimeError(\"Invalid backend loaded\")\n\n safe_call(backend.get().afcl_set_device_id(idx))\n return", "def __setattr__(self,name,value):\n def isInDicts(name,cls):\n for c in cls.mro()[:-1]:\n if name in c.__dict__:\n return True\n return False\n from inspect import stack\n if name in self.part_dict:\n head = self if self._head==0 else self.head\n TreeNode(self.part_dict[name]+self.head.nid,self.tree,head).record=value\n elif (name.startswith('_')\n or name in self.__dict__\n or isInDicts(name,self.__class__)\n or isinstance(stack()[1][0].f_locals.get('self',None),Device)):\n super(Device,self).__setattr__(name,value)\n else: print(\"\"\"WARNING: your tried to add the attribute or write to the subnode '%s' of '%s'.\nThis is a deprecated action for Device nodes outside of Device methods. You should prefix the attribute with '_'.\nIf you did intend to write to a subnode of the device you should check the proper path of the node: TreeNNF.\n\"\"\"%(name, self.path))", "def update_device_by_id(self, device_id, **kwargs):\n return self.netbox_con.patch('/dcim/devices/', device_id, **kwargs)", "def cuda(self: T, device: Optional[int] = None) -> T:\n return self.to(torch.device(f\"cuda:{device}\" if device is not None else \"cuda\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get outputs for a batch of images and text.
def get_outputs( self, batch_text: List[str], batch_images: List[List[Image.Image]], min_generation_length: int, max_generation_length: int, num_beams: int, length_penalty: float, ) -> List[str]:
[ "def get_batch_size_1_output_images(outputs, b):\n b_1_outputs = {}\n for field in standard_fields.get_output_image_fields():\n if field in outputs:\n b_1_outputs[field] = outputs[field][b:b + 1, Ellipsis]\n return b_1_outputs", "def infer_batch(self, batch):\n\t\t\n\t\t# decode \n num_batch_elements = len(batch.images)\n eval_list = [self.decoder]\n feed_dict = {self.input_images : batch.images, self.seq_len : [Model.max_text_len] * num_batch_elements, self.is_train: False}\n eval_res = self.sess.run(eval_list, feed_dict)\n decoded = eval_res[0]\n texts = self.decoder_output_to_text(decoded, num_batch_elements)\n\n return texts", "def tesseract_recog_inference(self, imgs, **kwargs):\n is_batch = True\n if isinstance(imgs, np.ndarray):\n is_batch = False\n imgs = [imgs]\n assert is_type_list(imgs, np.ndarray)\n api = self.get_tesserocr_api()\n\n results = []\n for img in imgs:\n image = Image.fromarray(img)\n api.SetImage(image)\n api.SetRectangle(0, 0, img.shape[1], img.shape[0])\n # Remove beginning and trailing spaces from Tesseract\n text = api.GetUTF8Text().strip()\n conf = api.MeanTextConf() / 100\n results.append({'text': text, 'score': conf})\n\n # close tesserocr api\n api.End()\n\n if not is_batch:\n return results[0]\n else:\n return results", "def process_test_set(fb, textons, k):\n test = None\n labels = []\n for cat in CLASSES:\n label = int(cat.split('T')[1])\n regex = osp.join(TEST_PATH, cat + '*.jpg')\n imgs = glob.glob(regex)\n for img in imgs:\n print(img)\n hist = compute_texton_histogram(img, fb, textons, k)\n hist = hist.reshape(len(hist), 1)\n if test is None:\n test = hist\n else:\n test = np.hstack((test, hist))\n labels.append(label)\n return test, labels", "def extract(cfg, sess, img_path, output_dir):\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img_batches = image_to_batches(img)\n\n batches_out = sess.run('bob_vars_1/bob_eval_out:0',\n feed_dict={'img_in:0': img_batches})\n\n batches_to_file(batches_out, output_dir)", "def generate_images_pred(self, inputs, outputs):\n assert outputs[(\"disp\", 0)].shape[-2:] == (\n self.height, self.width), f'{outputs[(\"disp\", 0)].shape[-2:]} should be {(self.height, self.width)} '\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n if frame_id == \"s\":\n T = inputs[\"stereo_T\"]\n else:\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\",\n align_corners=True)\n\n if not self.disable_automasking:\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def logImage (self, tag, images, csc=None, h=None, w=None, maxOutputs=3, **kwargs):\n\t\t\n\t\tif isinstance(images, (bytes, bytearray)):\n\t\t\t\"\"\"\n\t\t\t\"Raw\" calling convention: `image` contains an image file, and all\n\t\t\targuments are mandatory. Image is logged encoded as-is\n\t\t\t\"\"\"\n\t\t\t\n\t\t\tmetadata, reject, tag = self._commonTagLogic(\"images\", tag=tag+\"/image\", **kwargs)\n\t\t\tif reject: return self\n\t\t\t\n\t\t\tval = TfImage(height = int(h),\n\t\t\t width = int(w),\n\t\t\t colorspace = int(csc),\n\t\t\t imageData = images).asValue(tag, metadata)\n\t\t\twith self._lock:\n\t\t\t\treturn self._stageValue(val)\n\t\telif isinstance(images, (list, np.ndarray)):\n\t\t\t\"\"\"\n\t\t\t\"Numpy\" calling convention: `image` is a numpy ndarray shaped (N,C,H,W).\n\t\t\tConversion is to PNG -z 9. The precise transformation depends on the\n\t\t\tnumber of channels, datatype and content.\n\t\t\t\"\"\"\n\t\t\t\n\t\t\t#\n\t\t\t# Expand dimensionality\n\t\t\t#\n\t\t\tif isinstance(images, np.ndarray) and images.ndim == 3:\n\t\t\t\timages = images[np.newaxis, ...]\n\t\t\t\n\t\t\t#\n\t\t\t# Iterate.\n\t\t\t#\n\t\t\tfor i, image in enumerate(images):\n\t\t\t\t#\n\t\t\t\t# Do not output more than the limit of images.\n\t\t\t\t#\n\t\t\t\tif i >= maxOutputs:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# Follow TF naming algorithm for image batches.\n\t\t\t\t#\n\t\t\t\tif i == 0 and maxOutputs == 1:\n\t\t\t\t\tmetadata, reject, tag = self._commonTagLogic(\"images\", tag=tag+\"/image\", **kwargs)\n\t\t\t\telse:\n\t\t\t\t\tmetadata, reject, tag = self._commonTagLogic(\"images\", tag=tag+\"/image/\"+str(i), **kwargs)\n\t\t\t\tif reject: continue\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# Follow TF type-conversion algorithm for individual images.\n\t\t\t\t#\n\t\t\t\t# If c == 1: Assume grayscale.\n\t\t\t\t# Elif c == 2: Assume grayscale+alpha.\n\t\t\t\t# Elif c == 3: Assume RGB.\n\t\t\t\t# Elif c == 4: Assume RGBA.\n\t\t\t\t# Else: raise\n\t\t\t\t#\n\t\t\t\tc, h, w = image.shape\n\t\t\t\tif c == 1:\n\t\t\t\t\tcsc = TfColorSpace.GRAYSCALE\n\t\t\t\t\tmode = \"L\"\n\t\t\t\telif c == 2:\n\t\t\t\t\tcsc = TfColorSpace.GRAYSCALE_ALPHA\n\t\t\t\t\tmode = \"LA\"\n\t\t\t\telif c == 3:\n\t\t\t\t\tcsc = TfColorSpace.RGB\n\t\t\t\t\tmode = \"RGB\"\n\t\t\t\telif c == 4:\n\t\t\t\t\tcsc = TfColorSpace.RGBA\n\t\t\t\t\tmode = \"RGBA\"\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Invalid image specification!\")\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# (continued TF type-conversion algorithm for individual images)\n\t\t\t\t#\n\t\t\t\t# If image.dtype == np.uint8:\n\t\t\t\t# pass\n\t\t\t\t# Elif image.min() >= 0:\n\t\t\t\t# image /= image.max()/255.0\n\t\t\t\t# image = image.astype(np.uint8)\n\t\t\t\t# Else:\n\t\t\t\t# image.scale( s.t. min >= -127 and max <= 128 )\n\t\t\t\t# image += 127\n\t\t\t\t#\n\t\t\t\tif image.dtype == np.uint8:\n\t\t\t\t\tpass\n\t\t\t\telif image.min() >= 0:\n\t\t\t\t\timage *= +255.0/image.max()\n\t\t\t\telse:\n\t\t\t\t\tfMin, fMax = abs(-127.0/image.min()), abs(+128.0/image.max())\n\t\t\t\t\timage *= np.minimum(fMin, fMax)\n\t\t\t\t\timage += +127.0\n\t\t\t\timage = image.astype(np.uint8)\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# Encode as PNG using an in-memory buffer as the \"file\" stream.\n\t\t\t\t#\n\t\t\t\t\n\t\t\t\tfrom PIL.Image import frombytes\n\t\t\t\tstream = BytesIO()\n\t\t\t\timage = frombytes(mode, (w,h), image.transpose(1,2,0).tobytes(\"C\"))\n\t\t\t\timage.save(stream, format=\"png\", optimize=True) # Always PNG -z 9\n\t\t\t\timage = stream.getvalue()\n\t\t\t\tstream.close()\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# Log the image.\n\t\t\t\t#\n\t\t\t\tval = TfImage(height = int(h),\n\t\t\t\t width = int(w),\n\t\t\t\t colorspace = int(csc),\n\t\t\t\t imageData = image).asValue(tag, metadata)\n\t\t\t\twith self._lock:\n\t\t\t\t\tself._stageValue(val)\n\t\telse:\n\t\t\traise ValueError(\"Unable to interpret image arguments!\")\n\t\t\n\t\treturn self", "def generate_images_pred(self, inputs, outputs):\n for scale in self.opt.scales:\n disp = outputs[(\"disp\", scale)]\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n # without interpolate\n if self.opt.using_v not in [3,4]:\n disp = F.interpolate(\n disp, [self.opt.height, self.opt.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)#disp_to_depth function is in layers.py\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.opt.frame_ids[1:]):\n\n if frame_id == \"s\":\n T = inputs[\"stereo_T\"]\n else:\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n if self.opt.pose_model_type == \"posecnn\":\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n if not self.opt.disable_automasking:\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def _process_image_files_batch(\n coder: ImageCoder,\n output_file: str,\n filenames: Iterable[str],\n synsets: Iterable[Union[str, bytes]],\n labels: Mapping[str, int]):\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for filename, synset in zip(filenames, synsets):\n image_buffer, height, width = _process_image(filename, coder)\n label = labels[synset]\n example = _convert_to_example(filename, image_buffer, label,\n synset, height, width)\n writer.write(example.SerializeToString())\n\n writer.close()", "def postprocess(results, filenames, batch_size):\n if len(results) != 1:\n raise Exception(\"expected 1 result, got {}\".format(len(results)))\n\n batched_result = results[0].batch_classes\n if len(batched_result) != batch_size:\n raise Exception(\"expected {} results, got {}\".format(batch_size, len(batched_result)))\n if len(filenames) != batch_size:\n raise Exception(\"expected {} filenames, got {}\".format(batch_size, len(filenames)))\n\n for (index, result) in enumerate(batched_result):\n print(\"Image '{}':\".format(filenames[index]))\n for cls in result.cls:\n print(\" {} ({}) = {}\".format(cls.idx, cls.label, cls.value))", "def collate_fn(self, batch):\n\n images = list()\n annotations = list()\n\n for b in batch:\n images.append(b[0])\n annotations.append(b[1])\n\n images = torch.stack(images, dim=0)\n\n return images, annotations # tensor (N, 3, 300, 300), 3 lists of N tensors each", "def extract_images_and_targets(read_data):\n suffix = 0\n\n images = []\n keys = []\n locations = []\n classes = []\n masks = []\n keypoints = []\n\n while fields.InputDataFields.image + str(suffix) in read_data:\n image = read_data[fields.InputDataFields.image + str(suffix)]\n key = ''\n if fields.InputDataFields.source_id in read_data:\n key = read_data[fields.InputDataFields.source_id + str(suffix)]\n location_gt = (\n read_data[fields.InputDataFields.groundtruth_boxes + str(suffix)])\n classes_gt = tf.cast(\n read_data[fields.InputDataFields.groundtruth_classes + str(suffix)],\n tf.int32)\n classes_gt -= label_id_offset\n masks_gt = read_data.get(\n fields.InputDataFields.groundtruth_instance_masks + str(suffix))\n keypoints_gt = read_data.get(\n fields.InputDataFields.groundtruth_keypoints + str(suffix))\n\n if merge_multiple_label_boxes:\n location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(\n location_gt, classes_gt, num_classes)\n else:\n classes_gt = util_ops.padded_one_hot_encoding(\n indices=classes_gt, depth=num_classes, left_pad=0)\n\n # Batch read input data and groundtruth. Images and locations, classes by\n # default should have the same number of items.\n images.append(image)\n keys.append(key)\n locations.append(location_gt)\n classes.append(classes_gt)\n masks.append(masks_gt)\n keypoints.append(keypoints_gt)\n\n suffix += 1\n\n return (images, keys, locations, classes, masks, keypoints)", "def get_outputs(self):\r\n return []", "def export(self, img_files, targets, output_folder, filename_prefix=\"dataset\"):\n assert isinstance(img_files, (list, tuple)), \"Arguments `img_files` should be lists or tuples\"\n if targets is not None:\n assert isinstance(targets, (list, tuple)), \"Arguments `targets` should be lists or tuples\"\n assert len(img_files) == len(\n targets\n ), \"Number of input images should be equal to the number of input targets\"\n else:\n targets = [None] * len(img_files)\n\n output = Path(output_folder)\n if not output.exists():\n output.mkdir(parents=True)\n\n n_rows = max(min(int(np.ceil(len(img_files) / self.n_cols)), self.max_n_rows), 1)\n total_width = (self.max_output_img_size[0] + self.margins[0]) * self.n_cols\n total_height = (self.max_output_img_size[0] + self.margins[0]) * n_rows\n size = (total_width, total_height)\n n_images = len(img_files)\n max_counter = n_rows * self.n_cols\n\n with get_tqdm(total=n_images) as bar:\n for c in range(0, n_images, max_counter):\n total_img = Image.new(mode=\"RGB\", size=size, color=self.background_color)\n filepath = output / (filename_prefix + \"_part_{}.png\".format(c))\n for i, (f, t) in enumerate(zip(img_files[c : c + max_counter], targets[c : c + max_counter])):\n iy, ix = np.unravel_index(i, (n_rows, self.n_cols))\n x = ix * (self.max_output_img_size[0] + self.margins[0]) + self.margins[0] // 2\n y = iy * (self.max_output_img_size[1] + self.margins[1]) + self.margins[1] // 2\n\n raw_img = self.read_img_fn(f)\n image_id = self.img_id_fn(f)\n target = self.read_target_fn(t)\n img = render_datapoint(\n raw_img,\n target,\n image_id=image_id,\n output_size=self.max_output_img_size,\n text_color=self.text_color,\n text_size=self.text_size,\n geom_color=self.geom_color,\n blend_alpha=self.blend_alpha,\n )\n total_img.paste(img, (x, y))\n bar.update(1)\n total_img.save(filepath.as_posix())", "def gather_images(datasets, batch_img_paths):\r\n n_batch = len(batch_img_paths)\r\n\r\n images = [[] for d in datasets]\r\n image_idx = [[] for d in datasets]\r\n\r\n for img_path in batch_img_paths:\r\n\r\n img_path_idx = index_by_path(datasets, img_path) \r\n\r\n for j, path_idx in enumerate(img_path_idx):\r\n\r\n images[j].extend(load_dataset_images(datasets[j][path_idx[0]], path_idx[1], 1))\r\n image_idx[j].append(path_idx[0]) # the model/dataset that the image is mapped to\r\n\r\n return images, image_idx", "def batchify(batch):\n\n \"\"\"Gather a batch of individual examples into one batch.\"\"\"\n NUM_INPUTS = 3\n NUM_TARGETS = 2\n NUM_EXTRA = 1\n\n ids = [ex[-1] for ex in batch]\n docs = [ex[0] for ex in batch]\n features = [ex[1] for ex in batch]\n questions = [ex[2] for ex in batch]\n\n # Batch documents and features\n max_length = max([d.size(0) for d in docs])\n docs_indices = torch.LongTensor(len(docs), max_length).zero_()\n docs_mask = torch.ByteTensor(len(docs), max_length).fill_(1)\n if features[0] is None:\n docs_feature = None\n else:\n docs_feature = torch.zeros(len(docs), max_length, features[0].size(1))\n for i, d in enumerate(docs):\n docs_indices[i, :d.size(0)].copy_(d)\n docs_mask[i, :d.size(0)].fill_(0)\n if docs_feature is not None:\n docs_feature[i, :d.size(0)].copy_(features[i])\n\n # Batch questions\n max_length = max([q.size(0) for q in questions])\n questions_indices = torch.LongTensor(len(questions), max_length).zero_()\n questions_mask = torch.ByteTensor(len(questions), max_length).fill_(1)\n for i, q in enumerate(questions):\n questions_indices[i, :q.size(0)].copy_(q)\n questions_mask[i, :q.size(0)].fill_(0)\n\n # Maybe return without targets\n if len(batch[0]) == NUM_INPUTS + NUM_EXTRA:\n return docs_indices, docs_feature, docs_mask, questions_indices, questions_mask, ids\n\n elif len(batch[0]) == NUM_INPUTS + NUM_EXTRA + NUM_TARGETS:\n # ...Otherwise add targets\n if torch.is_tensor(batch[0][3]):\n start = torch.cat([ex[3] for ex in batch])\n end = torch.cat([ex[4] for ex in batch])\n else:\n start = [ex[3] for ex in batch]\n end = [ex[4] for ex in batch]\n else:\n raise RuntimeError('Incorrect number of inputs per example.')\n\n return docs_indices, docs_feature, docs_mask, questions_indices, questions_mask, start, end, ids", "def get_result(i=None, targets='all'):", "def compute_targets(self, image_group, annotations_group):\n \"\"\"\n Compute target outputs for the network using images and their annotations.\n \"\"\"\n\n batches_targets = anchors.anchor_targets_bbox(\n self.anchors,\n image_group,\n annotations_group,\n num_classes=self.num_classes(),\n )\n return list(batches_targets)", "def get_outputs(self, inputs):\n\n # first layer\n out = get_outputs(self.layers[0], inputs)\n\n for i in range(1, len(self.layers)):\n out = get_outputs(self.layers[i], out)\n\n return out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the prompt to use for VQA evaluation. If the answer is not provided, it should be left blank to be generated by the model.
def vqa_prompt(self, question, answer=None) -> str:
[ "def prompt(self) -> str:\n self._logger.info(\"Retrieving voice prompts setting...\")\n raise NotImplementedError(\"Prompt() is not implemented yet.\")", "def eval_prompt(self, input):\n return input", "def _get_select_question_input(): # pragma: no cover\n questions = [\n inquirer.List('answer',\n message='Do you wanna select this paper?',\n choices=[\n 'Skip', \n 'No', \n 'Yes', \n 'Save what I\\'ve done so far and leave'],\n ),\n ]\n return inquirer.prompt(questions).get('answer')", "def prompt(self):\n\t\t_globals._console.write(f'{self.prompt_str} ')", "def prompt(self):\n return self.eval_prompt(self.env.get('PS2', u'> ')\n if self.is_multiline else self.env.get('PS1', u'% '))", "def base_prompt(self):\r\n return self._base_prompt", "def ask(question, default = None):\n if default:\n question += \" (default: \" + default + \"): \"\n answer = raw_input(question)\n if answer != \"\":\n return answer\n else:\n return default", "def ask_question(question, input_type, default=None, hide_input=False):\n if default:\n return click.prompt(\n question, type=input_type, default=default, hide_input=hide_input)\n return click.prompt(question, type=input_type, hide_input=hide_input)", "def step_see_prompt(context):\n context.cli.expect('wharfee> ')", "def get_param(prompt_string):\n\tscreen.clear()\n\tscreen.border(0)\n\tscreen.addstr(2, 2, prompt_string)\n\tscreen.refresh()\n\tinput = screen.getstr(10, 10, 60)\n\treturn input", "def selectionPrompt(nodetypecategory, multisel = True,\n whichprompt = 0):\n return ''", "def test_prompting(self):\n pass", "def show_question(self):\n\t\tprint(question)", "def display_prompt() -> str:\r\n user_input = input(\"\\nL)oad image S)ave-as \\n\" \r\n + \"2)-tone 3)tone X)treme contrast T)int sepia P)osterize \\n\" \r\n + \"E)dge detect I)mproved edge detect V)ertical flip H)orizontal flip \\n\" \r\n + \"Q)uit \\n\" \r\n + \": \"\r\n ) \r\n user_input = user_input.upper()\r\n return user_input", "def show_question(self):\n print(question)", "def question(self, question):\n if self.use_STT:\n self.say(question)\n response = STT.wait_for_voice()\n else:\n naoqiutils.speak(question)\n response = raw_input(question + \"\\n> \")\n return response", "def step_expect_prompt(context):\n context.cli.expect('wharfee> ')", "def input(prompt=None):\n if prompt:\n sys.stderr.write(str(prompt))\n return builtins.input()", "def prompt(self, message):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the prompt to use for caption evaluation. If the caption is not provided, it should be left blank to be generated by the model.
def caption_prompt(self, caption=None) -> str:
[ "def caption(self) -> str:\n return self.attributes.get('caption', str(self.path))", "def prompt(self) -> str:\n self._logger.info(\"Retrieving voice prompts setting...\")\n raise NotImplementedError(\"Prompt() is not implemented yet.\")", "def vqa_prompt(self, question, answer=None) -> str:", "def display_prompt() -> str:\r\n user_input = input(\"\\nL)oad image S)ave-as \\n\" \r\n + \"2)-tone 3)tone X)treme contrast T)int sepia P)osterize \\n\" \r\n + \"E)dge detect I)mproved edge detect V)ertical flip H)orizontal flip \\n\" \r\n + \"Q)uit \\n\" \r\n + \": \"\r\n ) \r\n user_input = user_input.upper()\r\n return user_input", "def prompt(self):\n return self.eval_prompt(self.env.get('PS2', u'> ')\n if self.is_multiline else self.env.get('PS1', u'% '))", "def base_prompt(self):\r\n return self._base_prompt", "def get_string(title, prompt='Enter a string', initial=''):\n from tkinter.simpledialog import askstring\n return askstring(title, prompt, initialvalue=initial)", "def get_pick_prompt(cls, ctx: RoleActionContext) -> Message:\n return messages.PICK_PROMPT[cls.name]", "def caption(self) -> str:\n for prop in self.schema.caption:\n for value in self.get(prop):\n return value\n return self.schema.label", "def prompt(self):\n\t\t_globals._console.write(f'{self.prompt_str} ')", "def _update_prompt(self):\n\n elements = [localizer.GR + os.getcwd()]\n if self._params.capture:\n capture = (self._params.capture[:7] + '..') if len(self._params.capture) > 9 else self._params.capture\n elements.append(localizer.G + capture)\n if self._params.iface is not None:\n elements.append(localizer.C + self._params.iface)\n if self._params.duration > 0:\n elements.append(localizer.GR + str(self._params.duration) + 's')\n\n separator = localizer.W + ':'\n self.prompt = separator.join(elements) + localizer.W + '> '", "def input_text(thePrompt: str, theInputWidth: int, theDefaultInput: str = None, **kwds):\n box = Dialog(**kwds)\n d = box.margin\n\n def ok():\n box.dismiss(True)\n\n def cancel():\n box.dismiss(False)\n\n lb = Label(thePrompt)\n lb.topleft = (d, d)\n tf = TextField(theInputWidth)\n if theDefaultInput:\n tf.set_text(theDefaultInput)\n tf.enter_action = ok\n tf.escape_action = cancel\n tf.top = lb.top\n tf.left = lb.right + 5\n box.add(lb)\n box.add(tf)\n tf.focus()\n box.shrink_wrap()\n if box.present():\n return tf.get_text()\n else:\n return None", "def prompt_experiment(experiments):\n completer = WordCompleter(experiments)\n exp = ''\n key = 'default'\n while exp not in experiments:\n exp = prompt(\"Experiment: \", toolbar_msg_key=key, completer=completer)\n key = 'experiment'\n return exp", "def get_current_text(self) -> Optional[SccCaptionText]:\n return self._current_text", "def eval_prompt(self, input):\n return input", "def setCaption(self, caption):", "def prompt_choice(prompt_text: str, choices: Iterable[str], default: str = ''):\n completer = WordCompleter(choices)\n choices_text = ', '.join(choices)\n return prompt(\n prompt_text,\n hint=choices_text,\n default=default,\n completer=completer,\n validator=is_one_of(choices))", "def _build_prompt_string(self) -> str:\n remote = \"\"\n\n if self._client.is_connected_to_server():\n remote = self._client.connection.server_info.get(\"name\")\n\n if self._client.is_connected_to_sharing():\n remote += \".{}:{}\".format(\n self._client.connection.current_sharing_name(),\n self._client.connection.current_rcwd()\n )\n\n local = os.getcwd()\n\n sep = (\" \" + 2 * self._prompt_local_remote_sep + \" \") if remote else \"\"\n\n styled = is_styling_enabled()\n R = ansi.RESET if styled else \"\"\n B = ansi.ATTR_BOLD if styled else \"\"\n M = ansi.FG_MAGENTA if styled else \"\"\n C = ansi.FG_CYAN if styled else \"\"\n IS = ansi.RL_PROMPT_START_IGNORE if styled else \"\"\n IE = ansi.RL_PROMPT_END_IGNORE if styled else \"\"\n\n # Escape sequence must be wrapped into \\001 and \\002\n # so that readline can handle those well and deal with terminal/prompt\n # width properly\n # use a leading DELETE_EOL for overwrite eventual previously printed ^C\n # (won't overwrite the previous prompt since KeyboardInterrupt is captured\n # and prints a new line)\n # prompt = IS + ansi.RESET_LINE + IE + \\\n\n prompt = \\\n ((IS + B + M + IE + remote + IS + R + IE) if remote else \"\") + \\\n ((IS + B + IE + sep + IS + R + IE) if sep else \"\") + \\\n IS + B + C + IE + local + IS + R + IE + \\\n IS + B + IE + \"> \" + IS + R + IE\n\n return prompt", "def test_caption(self):\n img_div = pq_img(self.p, '[[Image:img file.png|my caption]]')\n img = img_div('img')\n caption = img_div.text()\n\n eq_('/img/wiki_up/img%20file.png', img.attr('src'))\n eq_('my caption', img.attr('alt'))\n eq_('my caption', caption)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attach callback to ignite engine, attached method will be called on the end of each epoch
def attach(self, engine: ignite.engine.Engine): engine.add_event_handler(ignite.engine.Events.EPOCH_COMPLETED, self.store) engine.add_event_handler(ignite.engine.Events.ITERATION_COMPLETED, self.store)
[ "def every_after_train_step_callback_fn(self, sess):\n pass", "def after_train_iter(self, trainer):\n self.after_iter(trainer)", "def every_before_train_step_callback_fn(self, sess):\n pass", "def addInstanceAddedCallback(*args, **kwargs):\n \n pass", "def before_train_epoch(self, trainer):\n self.before_epoch(trainer)", "def epoch_finished(self):\n pass", "def register_epoch_callbacks(self, *callbacks):\n self.epoch_callbacks.extend(callbacks)", "def listener_callback(self):\n raise AssertionError(\"listener_callback is not implemented\")", "def on_epoch_start(self, X: Dict[str, Any], epoch: int) -> None:\n pass", "def pre_epoch(self):\n pass", "def on_eval_epoch_end(self, state: State) -> None:\n pass", "def callback_stopping(self, myrun):\n pass # pragma: no cover", "def epoch_finished(self):\n super(MiniBatchTrainer, self).epoch_finished()\n self.model.total_epochs += 1\n log.info('finished epoch %d, handled %d instances in total' %\n (self.model.total_epochs, self.model.total_examples))", "def register(self, callback):\n self.callbacks.append(callback)", "def on_train_end(self, logs=None, **kwargs):\n for callback in self.callbacks:\n callback.on_train_end(logs, **kwargs)", "def _register_engine_hooks(self, engine):\n pass", "def on_online(self, func):\n self._on_online = func", "def registerGamePreparationsEndedCallback(self, callback):\n\n\t\tself.__gamePreparationsEndedCallbacks.append(callback)", "def runCallback(self):\n if self._callBack is None:\n _LOGGER.warning(\"This blind not register callback function\")\n return\n self._callBack()", "def on_epoch_end(self):\n self.current_elbo = self.model.get_elbo()\n self.current_epoch += 1\n self.elbos += [self.current_elbo]\n self.epochs += [self.current_epoch]\n if self.verbose:\n print(\n \"Epoch {} \\tELBO: {}\".format(\n self.current_epoch, self.current_elbo\n )\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compress and chunk a NetCDF file using NCO using lossless deflation. We save in the "netcdf4" format because only then the chunking will be supported.
def compress_and_chunk(in_file, out_file): if not os.path.isfile(in_file): raise FileNotFoundError(f"Cannot find input file '{in_file}'.") if skip(in_file, out_file): return out_file if not shutil.which('ncks'): raise RuntimeError(f'The command `ncks` is not in the PATH.') opts = yaml.load(open('options.yaml')) compression_level = opts['compression_level'] chunk_lon = opts['chunks']['lon'] chunk_lat = opts['chunks']['lat'] chunk_time = opts['chunks']['time'] chunk_cache = opts['chunks']['cache'] cprint(f"Compressing and chunking file '{in_file}'...", 'yellow') try: subprocess.run(['ncks', '--deflate', str(compression_level), '--chunk_dimension', f'lon,{chunk_lon}', '--chunk_dimension', f'lat,{chunk_lat}', '--chunk_dimension', f'time,{chunk_time}', '--chunk_cache', str(chunk_cache), '--fl_fmt', 'netcdf4', in_file, out_file], check=True) except Exception: if os.path.isfile(out_file): cprint(f"Removing file '{out_file}'.", 'red') os.remove(out_file) raise assert(os.path.isfile(out_file)) cprint(f"Successfully created file: '{out_file}'", 'green') return out_file
[ "def _netCDF4_deflate(outnetcdf):\n cmd = ['ncks', '-4', '-L4', '-O', outnetcdf, outnetcdf]\n try:\n lib.run_in_subprocess(cmd, logger.debug, logger.error)\n logger.debug(f'netCDF4 deflated {outnetcdf}')\n except WorkerError:\n raise", "def create_intermediate_netcdf(output_name, chl_lons, chl_lats):\n global chunk_size\n global zlib_compression\n ds = nc.Dataset(output_name,'w',format='NETCDF4_CLASSIC')\n ds.createDimension('LONGITUDE', chl_lons.shape[0])\n ds.createDimension('LATITUDE', chl_lats.shape[0])\n ds.createDimension('DEPTH', 1)\n ds.createDimension('TIME', None)\n ds.createVariable('LATITUDE', 'float64', dimensions=['LATITUDE'], zlib=zlib_compression,)\n ds.variables['LATITUDE'].setncattr(\"units\", \"degrees_north\")\n ds.variables['LATITUDE'][:] = chl_lats\n ds.createVariable('LONGITUDE', 'float64', dimensions=['LONGITUDE'], zlib=zlib_compression,)\n ds.variables['LONGITUDE'].setncattr(\"units\", \"degrees_east\")\n ds.variables['LONGITUDE'][:] = chl_lons\n ds.createVariable('DEPTH', 'float32', dimensions=['DEPTH'], zlib=zlib_compression,)\n ds.variables['DEPTH'].setncattr(\"units\", \"meters\")\n ds.variables['DEPTH'].setncattr(\"positive\", \"down\")\n ds.variables['DEPTH'][:] = [0.1]\n ds.createVariable('TIME', 'float32', dimensions=['TIME'], zlib=zlib_compression,)\n ds.variables['TIME'].setncattr(\"units\", \"years\")\n ds.createVariable('zen', 'float32', dimensions=['TIME', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['zen'].setncattr(\"units\", \"degrees\")\n ds.createVariable('filled_chl', 'float32', dimensions=['TIME', 'DEPTH', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['filled_chl'].setncattr(\"units\", \"mg chl m^3\")\n ds.createVariable('anomaly', 'float32', dimensions=['TIME', 'DEPTH', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['anomaly'].setncattr(\"units\", \"mg chl m^3\")\n ds.createVariable('chl_cumsum', 'float32', dimensions=['TIME', 'DEPTH', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['chl_cumsum'].setncattr(\"units\", \"mg chl m^3\")\n ds.createVariable('chl_der', 'float32', dimensions=['TIME', 'DEPTH', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['chl_der'].setncattr(\"units\", \"mg chl m^3\")\n ds.createVariable('chl_boxcar', 'float32', dimensions=['TIME', 'DEPTH', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['chl_boxcar'].setncattr(\"units\", \"mg chl m^3\")\n ds.createVariable('sst_boxcar', 'float32', dimensions=['TIME', 'DEPTH', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['sst_boxcar'].setncattr(\"units\", \"degrees celsius\")\n ds.createVariable('sst_der', 'float32', dimensions=['TIME', 'DEPTH', 'LATITUDE', 'LONGITUDE'],fill_value=FILL_VAL, zlib=zlib_compression)\n ds.variables['sst_der'].setncattr(\"units\", \"degrees celsius\")\n ds.close()", "def writenc(mcdData,ncGeo,clon,clat,options,xchunk=150, ychunk=200):\n\n # Gridded Dimensions\n # ------------------\n nNS, nEW = clon.shape\n\n # Open NC file\n # ------------\n nc = Dataset(options.outFile,'w',format=options.format)\n\n # Set global attributes\n # ---------------------\n nc.title = options.title\n nc.institution = 'NASA/Goddard Space Flight Center'\n nc.source = 'Global Model and Assimilation Office'\n nc.history = 'Created from MCD43C1 v005 collections by mcd43c_sampler.py'\n nc.references = 'n/a'\n nc.comment = 'This file contains BRDF Kernels weights for the RTLS model for 8 MODIS bands sampled on a geostationary grid'\n nc.contact = 'Patricia Castellanos <patricia.castellanos@nasa.gov>'\n nc.Conventions = 'CF' \n nc.BAND1 = \"620-670nm\"\n nc.BAND2 = \"841-875nm\"\n nc.BAND3 = \"459-479nm\"\n nc.BAND4 = \"545-565nm\"\n nc.BAND5 = \"1230-1250nm\"\n nc.BAND6 = \"1628-1652nm\"\n nc.BAND7 = \"2105-2155nm\"\n\n # Create dimensions\n # -----------------\n x = nc.createDimension('ew',nEW)\n y = nc.createDimension('ns',nNS)\n k = nc.createDimension('Kernels',Kernels)\n\n # Add pseudo dimensions for GrADS compatibility\n # -------------------------------------------\n _copyVar(ncGeo,nc,'ew',dtype='f4',zlib=False)\n _copyVar(ncGeo,nc,'ns',dtype='f4',zlib=False)\n\n # Save lon/lat if so desired\n # --------------------------\n if options.coords:\n _copyVar(ncGeo,nc,'clon',dtype='f4',zlib=False)\n _copyVar(ncGeo,nc,'clat',dtype='f4',zlib=False)\n\n # Loop over Bands writing each dataset\n #---------------------------------------\n dim = ('Kernels','ns','ew')\n chunks = (1,ychunk, xchunk)\n for b in outbands:\n this = nc.createVariable(bandname[b],'f4',dim,\n zlib=options.zlib,\n chunksizes=chunks) \n\n this.long_name = bandname[b] + ' BRDF Kernel weight: isotropic, volumetric, geometric'\n this.missing_value = -99999\n this.unit = 'none' \n\n data = np.ma.masked_all([Kernels,nNS,nEW])\n temp = np.ma.masked_all([nNS,nEW]) \n for i,k in enumerate(kernel_names): \n temp[~clon.mask] = getattr(mcdData,k + '_' + b) \n data[i,:,:] = temp\n\n this[:] = data\n\n\n nc.close()", "def chunk_by_chunk(chunk_idx, chunk):\n global chl_ds\n global args\n global start_date\n global debug_chunk\n global do_only_debug_chunk\n global output_folder\n\n chunk_start_date = start_date\n\n #if we have been instructed to skip everything then do so\n if do_only_debug_chunk and not chunk_idx == debug_chunk:\n return True\n\n slc = [slice(None)] * len(chl_ds.variables[chl_variable].shape)\n x,y = chunk\n y = y if y[0] != 1 else (0, y[1])\n x = x if x[0] != 1 else (0, x[1])\n slc[LON_IDX] = slice(x[0], x[1])\n slc[LAT_IDX] = slice(y[0], y[1])\n\n output = os.path.join(output_folder, os.path.basename(chl_filename).replace(\".nc\", \"_phenology_{}_chunk{}.nc\".format(time_of_run, chunk_idx)))\n \n chl_lock.acquire()\n chl_array = chl_ds.variables[chl_variable][slc]\n\n chl_lons = chl_ds.variables[chl_lon_var][x[0]:x[1]]\n chl_lats = chl_ds.variables[chl_lat_var][y[0]:y[1]]\n #mask if it isn't, else will raise errors\n chl_array = numpy.ma.masked_array(chl_array, numpy.isnan(chl_array))\n chl_array = numpy.ma.masked_where(chl_array == chl_array.fill_value, chl_array)\n chl_array = numpy.ma.masked_where(chl_array <= 0, chl_array)\n chl_array = numpy.ma.masked_invalid(chl_array)\n if chl_array.mask.all():\n logger.info(numpy.isnan(chl_array).all())\n logger.info(\"skipping as empty\")\n chl_lock.release()\n #output empty netcdf\n empty_med = numpy.empty((chl_lats.shape[0], chl_lons.shape[0]), dtype=chl_array.dtype) if LAT_IDX >LON_IDX else numpy.empty(shape=(chl_lons.shape[0], chl_lats.shape[0]), dtype=chl_array.dtype)\n create_phenology_netcdf(chl_lons, chl_lats, [1,1], output.replace(\".nc\", \"_by_maxval.nc\"), median=empty_med, std=empty_med, max_means=empty_med)\n create_phenology_netcdf(chl_lons, chl_lats, [1,1], output.replace(\".nc\", \"_by_date.nc\"), median=empty_med, std=empty_med, max_means=empty_med)\n create_phenology_netcdf(chl_lons, chl_lats, [1,1], output.replace(\".nc\", \"_by_duration.nc\"), median=empty_med, std=empty_med, max_means=empty_med)\n return True\n chl_lock.release()\n if len(chl_array.shape) == 3:\n logger.info(\"reshaping chl to {}\".format((chl_array.shape[0], 1, chl_array.shape[1], chl_array.shape[2])))\n chl_array.shape = (chl_array.shape[0], 1, chl_array.shape[1], chl_array.shape[2])\n \n logger.info(\"making temp storage\")\n\n \"\"\"\n if not (chl_array.shape[2] == chl_lats.shape[0] and chl_array.shape[3] == chl_lons.shape[0]):\n logger.info(\"adjusting to flip lat and lon\")\n chl_array.shape = (chl_array.shape[0], chl_array.shape[1], chl_array.shape[3], chl_array.shape[2])\n \"\"\"\n if args.extend_chl_data:\n chl_array, chunk_start_date = extend_array(chl_array, date_seperation_per_year, chunk_start_date)\n logger.info(\"start date after extension: {}\".format(chunk_start_date))\n if args.sst_location:\n sst_lock.acquire()\n logger.info(\"sst file provided, reading array\")\n logger.info(\"only one file found, assuming full stack of observations\")\n sst_ds = nc.Dataset(args.sst_location)\n try:\n sst_variable = [x for x in sst_ds.variables if args.sst_var.strip().lower() in x.strip().lower()][0]\n except IndexError:\n raise BlockingException(\"Could not find the sst var requested '{}'\\nthis is a blocking error, check the requested variable name matches the variable in the netcdf.\".format(args.sst_var))\n sst_lon_var, sst_lat_var, sst_time_var = ds_to_dim_vars(sst_ds, lat_search=args.lat_var, lon_search=args.lon_var, time_search=args.time_var)\n sst_lons = sst_ds.variables[sst_lon_var][:]\n sst_lats = sst_ds.variables[sst_lat_var][:]\n sst_time = sst_ds.variables[sst_time_var][:]\n SST_LAT_IDX = sst_ds.variables[sst_variable].dimensions.index(sst_lat_var)\n SST_LON_IDX = sst_ds.variables[sst_variable].dimensions.index(sst_lon_var)\n SST_TIME_IDX = sst_ds.variables[sst_variable].dimensions.index(sst_time_var)\n sst_slc = [slice(None)] * len(sst_ds.variables[sst_variable].shape)\n sst_slc[SST_LON_IDX] = slice(x[0], x[1])\n sst_slc[SST_LAT_IDX] = slice(y[0], y[1])\n sst_slc[SST_TIME_IDX] = slice(args.sst_start_index, sst_time.shape[0])\n sst_array = sst_ds.variables[sst_variable][sst_slc]\n logger.info(\"sst array shape at read:{}\".format(sst_array.shape))\n try:\n sst_fill_val = sst_ds.variables[sst_variable]._FillValue\n except:\n sst_fill_val = FILL_VAL\n \n if not numpy.ma.is_masked(sst_array):\n logger.debug(\"sst is not a masked array, masking it\")\n sst_array = numpy.ma.masked_array(sst_array, numpy.isnan(sst_array))\n sst_array = numpy.ma.masked_where(sst_array == sst_fill_val, sst_array)\n sst_lock.release()\n\n if args.extend_sst_data:\n logger.debug(\"extending sst array by repeating year at start and end\")\n sst_array, _ = extend_array(sst_array, date_seperation_per_year, start_date)\n\n if len(sst_array.shape) == 3:\n logger.info(\"reshaping sst to {}\".format((sst_array.shape[0], 1, sst_array.shape[1], sst_array.shape[2])))\n sst_array.shape = (sst_array.shape[0], 1, sst_array.shape[1], sst_array.shape[2])\n \n chl_shape, chl_dtype, chl_median, chl_std_dev, chl_max_means, perc_val_change, filled_chl = prepare_chl_variables(chl_array, \n chunk_idx, \n date_seperation_per_year, \n chl_lats, \n chl_lons, \n modelled_threshold=args.modelled_threshold, \n median_threshold=med_thresh, \n median_filename=chl_filename.replace(\".nc\", \n \"_median_{}_chunk{}.nc\".format(time_of_run,\n chunk_idx)\n ),\n chunk_idx=chunk_idx,\n std_dev_anomaly=do_std_dev,\n std_dev_threshold=std_dev_threshold,\n relative_max_anomaly=do_rel_max,\n max_means_threshold=max_means_threshold,\n max_means_anomaly=do_max_means,\n relative_median_anomaly=do_rel_med_anomaly,\n do_fill_chl=fill_chl,\n output_name=output,\n date_seperation_per_year=date_seperation_per_year,\n fill_filter_time_steps=args.fill_time_filter_steps,\n filter_spatial_steps=args.spatial_filter_steps,\n sbx_filter_time_steps=args.time_filter_steps)\n logger.info(\"chl_shape: {}\".format(chl_shape))\n logger.info(\"chl_dtype: {}\".format(USE_DTYPE))\n\n chl_ngd = get_chl_ngd(chunk_start_date, date_seperation_per_year, filled_chl)\n\n if args.sst_location:\n sst_shape, sst_dtype = prepare_sst_variables(sst_array, chunk_idx, skip=args.skip_sst_prep,chunk_idx=chunk_idx, output_name=output, filter_time_steps=args.sst_time_filter_steps, date_seperation=date_seperation_per_year)\n logger.info(\"sst_shape: {}\".format(sst_shape))\n logger.info(\"sst_dtype: {}\".format(USE_DTYPE))\n sst_array = None\n\n if sst_shape[2:] != chl_shape[2:]:\n logger.error(\"sst and chlorophyll x,y array shapes do not match got:\")\n logger.error(\"chlorophyll: {}\".format(chl_shape[2:]))\n logger.error(\"sst: {}\".format(sst_shape[2:]))\n logger.error(\"quitting!\")\n sys.exit()\n if not isinstance(filled_chl.mask, numpy.ndarray):\n logger.error(filled_chl.mask)\n logger.error(\"chl data has no mask for its variable data, this will completely break our logic\")\n logger.error(filled_chl)\n raise Exception(\"chl data has no mask for its variable data, this will completely break our logic\")\n #create the output files\n create_phenology_netcdf(chl_lons, chl_lats, chl_shape, output.replace(\".nc\", \"_by_maxval.nc\"), median=chl_median, std=chl_std_dev, max_means=chl_max_means, perc_val_change=perc_val_change)\n create_phenology_netcdf(chl_lons, chl_lats, chl_shape, output.replace(\".nc\", \"_by_date.nc\"), median=chl_median, std=chl_std_dev, max_means=chl_max_means, perc_val_change=perc_val_change)\n create_phenology_netcdf(chl_lons, chl_lats, chl_shape, output.replace(\".nc\", \"_by_duration.nc\"), median=chl_median, std=chl_std_dev, max_means=chl_max_means, perc_val_change=perc_val_change)\n logger.info(\"using start date {}\".format(chunk_start_date))\n get_multi_year_two_blooms_output(output,\n chunk_idx,\n chl_shape,\n chl_dtype, \n filled_chl, \n sst_shape, \n sst_dtype, \n date_seperation_per_year=date_seperation_per_year, \n start_date=chunk_start_date, \n reverse_search=reverse_search,\n reference_index=ref_index,\n out_date_netcdf=output.replace(\".nc\", \"_by_date.nc\"),\n out_duration_netcdf=output.replace(\".nc\", \"_by_duration.nc\"),\n out_netcdf=output.replace(\".nc\", \"_by_maxval.nc\"),\n chunk_idx=chunk_idx)\n \n logger.setLevel(default_logging)\n return True", "def create_empty_netcdf(fn, product_name=None, product_config_dict=None):\n\n if not product_name and not product_config_dict:\n raise ValueError(\n \"Either product_name or product_config_dict \" \"have to be supplied.\"\n )\n elif not product_name and product_config_dict:\n raise ValueError(\n \"A product_name has to be supplied when supplying \" \"a product_config_dict.\"\n )\n elif product_name and not product_config_dict:\n product_config_dict = radolan_product_netcdf_config[product_name]\n else:\n pass\n\n with netCDF4.Dataset(fn, \"w\") as nc_fh:\n n_lons = product_config_dict[\"metadata_fixed\"][\"n_lons\"]\n n_lats = product_config_dict[\"metadata_fixed\"][\"n_lats\"]\n\n # Get RADOLAN coordinates\n radolan_xy_grids = wrl.georef.get_radolan_grid(ncols=n_lons, nrows=n_lats)\n radolan_x = radolan_xy_grids[0, :, 0]\n radolan_y = radolan_xy_grids[:, 0, 1]\n radolan_lat_lon_grids = wrl.georef.get_radolan_grid(\n ncols=n_lons, nrows=n_lats, wgs84=True\n )\n radolan_lons = radolan_lat_lon_grids[:, :, 0]\n radolan_lats = radolan_lat_lon_grids[:, :, 1]\n\n # create dimensions\n nc_fh.createDimension(\"x\", n_lons)\n nc_fh.createDimension(\"y\", n_lats)\n nc_fh.createDimension(\"time\", None)\n\n # create the variables we need in all files\n nc_fh.createVariable(\"x\", \"f8\", (\"x\"))\n nc_fh.createVariable(\"y\", \"f8\", (\"y\"))\n nc_fh.createVariable(\"latitudes\", \"f8\", (\"y\", \"x\"))\n nc_fh.createVariable(\"longitudes\", \"f8\", (\"y\", \"x\"))\n nc_fh.createVariable(\"time\", \"f8\", (\"time\"))\n\n # create the individual specified variables with their attributes\n for variable_name, variable_config in product_config_dict[\"variables\"].items():\n variable_parameters = variable_config[\"variable_parameters\"].copy()\n nc_var = nc_fh.createVariable(\n varname=variable_name,\n datatype=variable_parameters.pop(\"datatype\"),\n **variable_parameters\n )\n nc_var.setncatts(variable_config[\"attributes\"])\n\n # create variables for the metadata that changes per time stamp\n for variable_name, variable_config in product_config_dict[\n \"metadata_per_timestamp\"\n ].items():\n variable_parameters = variable_config[\"variable_parameters\"].copy()\n nc_var = nc_fh.createVariable(\n varname=variable_name,\n datatype=variable_parameters.pop(\"datatype\"),\n **variable_parameters\n )\n nc_var.setncatts(variable_config[\"attributes\"])\n\n nc_fh.set_auto_maskandscale(True)\n\n # variable attributes\n nc_fh[\"time\"].long_name = \"Time\"\n nc_fh[\"time\"].standard_name = \"time\"\n nc_fh[\"time\"].units = \"hours since 2000-01-01 00:50:00.0\"\n nc_fh[\"time\"].calendar = \"standard\"\n\n nc_fh[\"x\"].long_name = \"RADOLAN Grid x coordinate of projection\"\n nc_fh[\"x\"].standard_name = \"projection_x_coordinate\"\n nc_fh[\"x\"].units = \"km\"\n\n nc_fh[\"y\"].long_name = \"RADOLAN Grid y coordinate of projection\"\n nc_fh[\"y\"].standard_name = \"projection_y_coordinate\"\n nc_fh[\"y\"].units = \"km\"\n\n nc_fh[\"latitudes\"].long_name = \"Latitude\"\n nc_fh[\"latitudes\"].standard_name = \"latitude\"\n nc_fh[\"latitudes\"].units = \"degrees_north\"\n\n nc_fh[\"longitudes\"].long_name = \"Longitude\"\n nc_fh[\"longitudes\"].standard_name = \"longitude\"\n nc_fh[\"longitudes\"].units = \"degrees_east\"\n\n # global attributes\n nc_fh.title = \"RADOLAN %s rainfall data\" % product_name\n nc_fh.producttype = product_name\n # nc_fh.source = 'ftp://ftp-cdc.dwd.de/pub/CDC/grids_germany/hourly/radolan/'\n nc_fh.institution = \"Deutscher Wetterdienst (DWD)\"\n nc_fh.history = \"Created at \" + str(datetime.utcnow())\n nc_fh.Conventions = \"CF-1.6\"\n\n # Add actual coordinate data\n nc_fh[\"latitudes\"][:, :] = radolan_lats\n nc_fh[\"longitudes\"][:, :] = radolan_lons\n nc_fh[\"x\"][:] = radolan_x\n nc_fh[\"y\"][:] = radolan_y\n\n # Add projection definition\n nc_fh.createVariable(\"radolan_grid\", \"f8\")\n nc_fh[\"radolan_grid\"].long_name = \"RADOLAN Grid\"\n nc_fh[\"radolan_grid\"].grid_mapping_name = \"polar_stereographic\"\n nc_fh[\"radolan_grid\"].semi_major_axis = 6370040.0\n nc_fh[\"radolan_grid\"].false_easting = 0.0\n nc_fh[\"radolan_grid\"].false_northing = 0.0\n nc_fh[\"radolan_grid\"].scale_factor_at_projection_origin = 0.9330127019\n nc_fh[\"radolan_grid\"].straight_vertical_longitude_from_pole = 10.0\n nc_fh[\"radolan_grid\"].latitude_of_projection_origin = 90.0", "def _binary_c2nc(file_in, file_out, quantity):\n\n columns = 3 # long, lat , depth\n mux_file = open(file_in, 'rb')\n\n # Number of points/stations\n (points_num,) = unpack('i', mux_file.read(4))\n\n # nt, int - Number of time steps\n (time_step_count,) = unpack('i', mux_file.read(4))\n\n #dt, float - time step, seconds\n (time_step,) = unpack('f', mux_file.read(4))\n\n msg = \"Bad data in the mux file.\"\n if points_num < 0:\n mux_file.close()\n raise ANUGAError(msg)\n if time_step_count < 0:\n mux_file.close()\n raise ANUGAError(msg)\n if time_step < 0:\n mux_file.close()\n raise ANUGAError(msg)\n\n lonlatdep = p_array.array('f')\n lonlatdep.read(mux_file, columns * points_num)\n lonlatdep = num.array(lonlatdep, dtype=float)\n lonlatdep = num.reshape(lonlatdep, (points_num, columns))\n\n lon, lat, depth = lon_lat2grid(lonlatdep)\n lon_sorted = list(lon)\n lon_sorted.sort()\n\n if not num.alltrue(lon == lon_sorted):\n msg = \"Longitudes in mux file are not in ascending order\"\n raise IOError(msg)\n\n lat_sorted = list(lat)\n lat_sorted.sort()\n\n nc_file = Write_nc(quantity,\n file_out,\n time_step_count,\n time_step,\n lon,\n lat)\n\n for i in range(time_step_count):\n #Read in a time slice from mux file\n hz_p_array = p_array.array('f')\n hz_p_array.read(mux_file, points_num)\n hz_p = num.array(hz_p_array, dtype=float)\n hz_p = num.reshape(hz_p, (len(lon), len(lat)))\n hz_p = num.transpose(hz_p) # mux has lat varying fastest, nc has long v.f.\n\n #write time slice to nc file\n nc_file.store_timestep(hz_p)\n\n mux_file.close()\n nc_file.close()\n\n return lonlatdep, lon, lat, depth", "def df_to_nc(df1,out_name):\n\txds = xr.Dataset.from_dataframe(df1)\n\txds.to_netcdf(str(out_name) + \".nc\")", "def ingest_cmorph_to_netcdf_full(work_dir,\r\n netcdf_file,\r\n raw=True):\r\n \r\n # create/initialize the NetCDF dataset, get back a data descriptor dictionary\r\n data_desc = _init_netcdf(netcdf_file, work_dir)\r\n\r\n with netCDF4.Dataset(netcdf_file, 'a') as output_dataset:\r\n \r\n # compute the time values \r\n total_years = 2017 - int(data_desc['start_date'].year) + 1 #FIXME replace this hard-coded value with an additional end_year entry in the data_desc\r\n output_dataset.variables['time'][:] = _compute_days(data_desc['start_date'].year,\r\n total_years * 12, \r\n initial_month=data_desc['start_date'].month,\r\n units_start_year=data_desc['units_since_year'])\r\n \r\n # get a handle to the precipitation variable, for convenience\r\n data_variable = output_dataset.variables['prcp']\r\n \r\n # loop over each year/month, reading binary data from CMORPH files and adding into the NetCDF variable\r\n for year in range(data_desc['start_date'].year, 2018): # from start year through 2017, replace the value 2018 here with some other method of determining this value from the dataset itself\r\n for month in range(1, 13):\r\n\r\n # get the files for the month\r\n downloaded_files = _download_daily_files(work_dir, year, month, raw)\r\n \r\n if len(downloaded_files) > 0:\r\n\r\n # read all the data for the month as a sum from the daily values, assign into the appropriate slice of the variable\r\n data = _read_daily_cmorph_to_monthly_sum(downloaded_files, data_desc, year, month)\r\n \r\n # assume values are in lat/lon orientation\r\n data = np.reshape(data, (1, data_desc['ydef_count'], data_desc['xdef_count']))\r\n \r\n # get the time index, which is actually the month's count from the start of the period of record \r\n time_index = ((year - data_desc['start_date'].year) * 12) + month - 1\r\n \r\n # assign into the appropriate slice for the monthly time step\r\n data_variable[time_index, :, :] = data\r\n \r\n # clean up\r\n for file in downloaded_files:\r\n os.remove(file)", "def read_ncswan(filename_or_fileglob, file_format=\"netcdf\", mapping=MAPPING, chunks={}):\n dset = open_netcdf_or_zarr(\n filename_or_fileglob=filename_or_fileglob,\n file_format=file_format,\n mapping=mapping,\n chunks=chunks,\n )\n return from_ncswan(dset)", "def prepare_netcdf(self, chosen_levels, campaign_name):\n now = dt.datetime.now()\n file_name = \"/\" + campaign_name + \"_\" + now.strftime(format=\"%Y_%m_%d_%H_%M\") + \".nc\"\n # 1.1 Root group:\n self.rootgrp = nc4.Dataset(self.dw_path + file_name, \"w\", format=\"NETCDF4\")\n # 1.2 Root group attributes\n self.rootgrp.description=\"FloX data downloaded from the Fluospecchio stack.\"\n self.rootgrp.campaign_name=campaign_name\n self.rootgrp.data_ownership=\"Remote Sensing of Water Systems (RSWS), Department of Geography, University of Zurich, Switzerland\"\n self.rootgrp.url=\"www.rsws.ch\"\n # 2. Create Sensor groups:\n # 2.1 FLUO\n self.rootgrp.createGroup(\"FLUO\")\n # 2.2 FULL\n self.rootgrp.createGroup(\"FULL\")\n # 2.3 Sensor attributes\n self.rootgrp[\"FLUO\"].sensor=\"Optic-Spec 1 (FLUO)\"\n self.rootgrp[\"FLUO\"].ssi=\"0.17 nm\"\n self.rootgrp[\"FLUO\"].fwhm=\"0.3 nm\"\n self.rootgrp[\"FLUO\"].fov=\"Dual FOV. Upwelling radiance 25°. Downwelling radiance 180°\"\n self.rootgrp[\"FLUO\"].snr=\"1000\"\n self.rootgrp[\"FULL\"].sensor=\"Optic-Spec 2 (FULL)\"\n self.rootgrp[\"FULL\"].ssi=\"0.65 nm\"\n self.rootgrp[\"FULL\"].fwhm=\"1.5 nm\"\n self.rootgrp[\"FULL\"].fov=\"Dual FOV. Upwelling radiance 25°. Downwelling radiance 180°\"\n\n for level in chosen_levels:\n # 3. Creating the level groups\n self.rootgrp[\"FLUO\"].createGroup(level)\n self.rootgrp[\"FULL\"].createGroup(level)\n # 4. Creating the data groups:\n # 4.1 FLUO\n self.rootgrp.createGroup(\"FLUO/\"+level+\"/Upwelling\")\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"].description = \"Upwelling (target reflected) radiation\"\n self.rootgrp.createGroup(\"FLUO/\"+level+\"/Downwelling\")\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"].description = \"Downwelling solar radiation\"\n # 4.2 FULL\n self.rootgrp.createGroup(\"FULL/\"+level+\"/Upwelling\")\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"].description = \"Upwelling (target reflected) radiation\"\n self.rootgrp.createGroup(\"FULL/\"+level+\"/Downwelling\")\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"].description = \"Downwelling solar radiation\"\n\n # 2. Creating the dimensions (wavelength, time)\n # 2.1 FLUO\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"].createDimension(\"wavelength\", 1024) # get correct length from data\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"].createDimension(\"time\", None) # we don't know the correct length at runtime\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"].createDimension(\"wavelength\", 1024) \n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"].createDimension(\"time\", None)\n # 2.2 FULL\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"].createDimension(\"wavelength\", 1024) \n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"].createDimension(\"time\", None) \n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"].createDimension(\"wavelength\", 1024) \n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"].createDimension(\"time\", None) \n\n # 3. Create variables for coordinates\n # 3.1 FLUO\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"].createVariable(\"wavelength\",\"f8\",(\"wavelength\",)) # f8 = 64-bit floating point\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"][\"wavelength\"].units = \"nm\"\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"][\"wavelength\"].long_name = \"Wavelength\"\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"].createVariable(\"time\", \"f8\", (\"time\",))\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"][\"time\"].units = \"seconds since 1970-01-01 00:00:00\"\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"][\"time\"].calendar = \"standard\"\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"][\"time\"].long_name = \"Time\"\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"].createVariable(\"wavelength\",\"f8\",(\"wavelength\",))\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"][\"wavelength\"].units = \"nm\"\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"][\"wavelength\"].long_name = \"Wavelength\"\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"].createVariable(\"time\", \"f8\", (\"time\",))\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"][\"time\"].units = \"seconds since 1970-01-01 00:00:00\"\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"][\"time\"].calendar = \"standard\"\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"][\"time\"].long_name = \"Time\"\n # 3.2 FULL\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"].createVariable(\"wavelength\",\"f8\",(\"wavelength\",)) \n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"][\"wavelength\"].units = \"nm\"\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"][\"wavelength\"].long_name = \"Wavelength\"\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"].createVariable(\"time\", \"f8\", (\"time\",))\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"][\"time\"].units = \"seconds since 1970-01-01 00:00:00\"\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"][\"time\"].calendar = \"standard\"\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"][\"time\"].long_name = \"Time\"\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"].createVariable(\"wavelength\",\"f8\",(\"wavelength\",))\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"][\"wavelength\"].units = \"nm\"\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"][\"wavelength\"].long_name = \"Wavelength\"\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"].createVariable(\"time\", \"f8\", (\"time\",))\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"][\"time\"].units = \"seconds since 1970-01-01 00:00:00\"\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"][\"time\"].calendar = \"standard\"\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"][\"time\"].long_name = \"Time\"\n\n # 4. Create variables for the measurements\n # 4.1 define data type and unit type\n if level == \"DN\":\n utype = \"DN\"\n name = \"Digital Numbers\"\n elif level == \"Radiance\" or \"SpecFit\":\n utype = \"W/m2/nm/sr\"\n name = \"Radiance\"\n else:\n utype = \"a.u.\"\n name = \"Reflectance\"\n # 4.2 FLUO\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"].createVariable(\"downwelling\", \"f8\", (\"wavelength\", \"time\",)) \n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"][\"downwelling\"].units = utype\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"][\"downwelling\"].long_name = name\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"].createVariable(\"upwelling\", \"f8\", (\"wavelength\", \"time\",)) \n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"][\"upwelling\"].units = utype\n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"][\"upwelling\"].long_name = name\n # 4.3 FULL\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"].createVariable(\"downwelling\", \"f8\", (\"wavelength\", \"time\",)) \n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"][\"downwelling\"].units = utype\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"][\"downwelling\"].units = name\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"].createVariable(\"upwelling\", \"f8\", (\"wavelength\", \"time\",)) \n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"][\"upwelling\"].units = utype\n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"][\"upwelling\"].long_name = name\n\n # 5. Create variables for the file name, they align with the time dimension - currently not working\n # 5.1 FLUO\n # self.rootgrp[level+\"/FLUO/Downwelling\"].createVariable(\"file_name\", \"vlen\", (\"time\",)) \n # self.rootgrp[level+\"/FLUO/Upwelling\"].createVariable(\"file_name\", \"vlen\", (\"time\",)) \n # # 5.2 FULL\n # self.rootgrp[level+\"/FULL/Downwelling\"].createVariable(\"file_name\", \"vlen\", (\"time\",)) \n # self.rootgrp[level+\"/FULL/Upwelling\"].createVariable(\"file_name\", \"vlen\", (\"time\",)) \n\n # 6. Create variables for other metadata elements, they also align with the time dimension\n # we could save on disk space by using u4 datatype, but currently no time to think about the consequences\n for mp in self.chosen_meta:\n if mp in self.level_meta.get(level):\n # 6.1 FLUO\n self.rootgrp[\"FLUO/\"+level+\"/Downwelling\"].createVariable(mp, \"f8\", (\"time\",)) \n self.rootgrp[\"FLUO/\"+level+\"/Upwelling\"].createVariable(mp, \"f8\", (\"time\",)) \n # 6.2 FULL\n self.rootgrp[\"FULL/\"+level+\"/Downwelling\"].createVariable(mp, \"f8\", (\"time\",)) \n self.rootgrp[\"FULL/\"+level+\"/Upwelling\"].createVariable(mp, \"f8\", (\"time\",)) \n\n self.log_writer.writeLog(\"INFO\", \"NetCDF4 file created.\")", "def write_nc(topography, filename, format='NETCDF3_64BIT_DATA'):\n from netCDF4 import Dataset\n if not topography.is_domain_decomposed and topography.communicator.rank > 1:\n return\n with Dataset(filename, 'w', format=format, parallel=topography.is_domain_decomposed,\n comm=topography.communicator) as nc:\n nx, ny = topography.nb_grid_pts\n sx, sy = topography.physical_sizes\n\n nc.createDimension('x', nx)\n nc.createDimension('y', ny)\n\n x_var = nc.createVariable('x', 'f8', ('x',))\n y_var = nc.createVariable('y', 'f8', ('y',))\n heights_var = nc.createVariable('heights', 'f8', ('x', 'y',))\n\n x_var.length = sx\n x_var.periodic = 1 if topography.is_periodic else 0\n if 'unit' in topography.info:\n x_var.length_unit = topography.info['unit']\n x_var[...] = (np.arange(nx) + 0.5) * sx / nx\n y_var.length = sy\n y_var.periodic = 1 if topography.is_periodic else 0\n if 'unit' in topography.info:\n y_var.length_unit = topography.info['unit']\n y_var[...] = (np.arange(ny) + 0.5) * sy / ny\n\n if topography.is_domain_decomposed:\n heights_var.set_collective(True)\n heights_var[topography.subdomain_slices] = topography.heights()", "def xr_save_by_grouping_along_dimension(ds,filepath, filename_prefix, grouping='chunk',parallel=True,dim='time'):\n \n return", "def _init_netcdf(netcdf_file,\r\n work_dir):\r\n \r\n # read data description info\r\n data_desc = _read_description(work_dir)\r\n \r\n # get the years covered\r\n years = _get_years()\r\n \r\n # create a corresponding NetCDF\r\n with netCDF4.Dataset(netcdf_file, 'w') as output_dataset:\r\n \r\n # create the time, x, and y dimensions\r\n output_dataset.createDimension('time', None)\r\n output_dataset.createDimension('lon', data_desc['xdef_count'])\r\n output_dataset.createDimension('lat', data_desc['ydef_count'])\r\n \r\n #TODO provide additional attributes for CF compliance, data discoverability, etc.\r\n output_dataset.title = data_desc['title']\r\n \r\n # create the coordinate variables\r\n time_variable = output_dataset.createVariable('time', 'i4', ('time',))\r\n x_variable = output_dataset.createVariable('lon', 'f4', ('lon',))\r\n y_variable = output_dataset.createVariable('lat', 'f4', ('lat',))\r\n \r\n # set the coordinate variables' attributes\r\n data_desc['units_since_year'] = 1800\r\n time_variable.units = 'days since %s-01-01 00:00:00' % data_desc['units_since_year']\r\n x_variable.units = 'degrees_east'\r\n y_variable.units = 'degrees_north'\r\n \r\n # generate longitude and latitude values, assign these to the NetCDF coordinate variables\r\n lon_values = list(_frange(data_desc['xdef_start'], data_desc['xdef_start'] + (data_desc['xdef_count'] * data_desc['xdef_increment']), data_desc['xdef_increment']))\r\n lat_values = list(_frange(data_desc['ydef_start'], data_desc['ydef_start'] + (data_desc['ydef_count'] * data_desc['ydef_increment']), data_desc['ydef_increment']))\r\n x_variable[:] = np.array(lon_values, 'f4')\r\n y_variable[:] = np.array(lat_values, 'f4')\r\n \r\n # read the variable data from the CMORPH file, mask and reshape accordingly, and then assign into the variable\r\n data_variable = output_dataset.createVariable('prcp', \r\n 'f8', \r\n ('time', 'lat', 'lon',), \r\n fill_value=np.NaN)\r\n\r\n # variable attributes\r\n data_variable.units = 'mm'\r\n data_variable.standard_name = 'precipitation'\r\n data_variable.long_name = 'precipitation, monthly cumulative'\r\n data_variable.description = data_desc['title']\r\n\r\n return data_desc", "def nc_write(pet, lat, lon, filename):\r\n\r\n ds = Dataset(filename, mode='w', format='NETCDF4_CLASSIC')\r\n\r\n time = ds.createDimension('time', None)\r\n latitude = ds.createDimension('latitude', len(lat))\r\n longitude = ds.createDimension('longitude', len(lon))\r\n \r\n time = ds.createVariable('time', np.float32, ('time',))\r\n latitude = ds.createVariable('latitude', np.float32, ('latitude',)) #, fill_value=-32767\r\n longitude = ds.createVariable('longitude', np.float32, ('longitude',))\r\n pet_val = ds.createVariable('pet', 'f4', ('time','latitude','longitude'),zlib=True)\r\n \r\n # units\r\n time.units='hours since 1981-01-01 00:00:00.0'\r\n time.calendar='proleptic_gregorian'\r\n latitude.units='degree_north'\r\n longitude.units='degree_east'\r\n pet_val.units='mm' \r\n # values\r\n time[:] = np.arange(pet.shape[0])\r\n latitude[:] = lat\r\n longitude [:] = lon\r\n pet_val[:,:,:] = pet\r\n\r\n ds.close()\r\n \r\n return None", "def write_ctd(ds: xr.Dataset) -> bytes:\n # When libcchdo was first written, netCDF for python didn't support in memory data\n nc_file = Dataset(\"inmemory.nc\", \"w\", format=\"NETCDF3_CLASSIC\", memory=0)\n\n define_dimensions(nc_file, ds.dims[\"N_LEVELS\"])\n\n # Define dataset attributes\n define_attributes(\n nc_file,\n ds.expocode, # self.globals.get('EXPOCODE', UNKNOWN),\n ds.get(\"section_id\", UNKNOWN), # self.globals.get('SECT_ID', UNKNOWN),\n \"WOCE CTD\",\n ds.station, # self.globals.get('STNNBR', UNKNOWN),\n ds[\"cast\"].astype(str), # self.globals.get('CASTNO', UNKNOWN),\n int(\n ds.get(\"btm_depth\", FILL_VALUE)\n ), # int(self.globals.get('DEPTH', FILL_VALUE)),\n )\n\n set_original_header(nc_file, ds)\n nc_file.WOCE_CTD_FLAG_DESCRIPTION = woce.CTD_FLAG_DESCRIPTION\n\n create_and_fill_data_variables(nc_file, ds)\n\n try:\n nobs_data = ds[\"ctd_number_of_observations\"].to_numpy()\n var_number = nc_file.createVariable(\"number_observations\", \"i4\", (\"pressure\",))\n var_number.long_name = \"number_observations\"\n var_number.units = \"integer\"\n var_number.data_min = np.min(nobs_data)\n var_number.data_max = np.max(nobs_data)\n var_number.C_format = \"%1d\"\n var_number[:] = nobs_data\n except KeyError:\n pass\n\n _create_common_variables(nc_file, ds)\n\n return bytes(nc_file.close())", "def create_sph_ncfile(filename,attribs,n,dim):\n nc_file = netCDF4.Dataset(filename,'w')\n\n # Miscellaneous attributes\n setattr(nc_file,'Date',1)\n setattr(nc_file,'Creator','ac')\n\n # sphvars file attributes\n for name,val in attribs.iteritems():\n setattr(nc_file,name,val)\n \n # Create netcdf dimensions\n # number of particles\n # spatial dimensions\n # timestep number\n nc_file.createDimension('timestep',None)\n nc_file.createDimension('particle',n)\n nc_file.createDimension('spatial',dim)\n\n # Create variables for the dimensions, and populate them\n tstep = nc_file.createVariable('timestep','d',('timestep',))\n part = nc_file.createVariable('particle','i',('particle',))\n space = nc_file.createVariable('spatial','i',('spatial',))\n \n part[:] = numpy.array(range(n))\n space[:] = numpy.array([0,1,2])\n\n dimnames = nc_file.dimensions.keys()\n\n # Set up variables\n # every particle property has a variable\n # and there are also variables for the box size\n # and the box dimensions.\n # a variable for 'time elapsed' at each step (for variable stepping)\n\n #each variable needs a \"units\" attribute\n\n #vector variables\n v_dims =('timestep','particle','spatial')\n\n #scalar variables\n sc_dims = ('timestep','particle')\n \n #histogram variables\n hist_dims = ('timestep')\n\n #total and average variables\n tot_dims = ('timestep')\n\n r = nc_file.createVariable('position','d',v_dims)\n v = nc_file.createVariable('velocity','d',v_dims)\n #a = nc_file.createVariable('acceleration','d',v_dims)\n #temp = nc_file.createVariable('temperature','d',sc_dims)\n energy = nc_file.createVariable('internal_energy','d',sc_dims)\n mass = nc_file.createVariable('mass','d',sc_dims)\n #rho = nc_file.createVariable('density','d',sc_dims)\n #press = nc_file.createVariable('pressure','d',sc_dims)\n #ss =nc_file.createVariable('sound_speed','d',sc_dims)\n #visc =nc_file.createVariable('viscosity','d',sc_dims)\n #h = nc_file.createVariable('smoothing_length','d',sc_dims)\n #hl = nc_file.createVariable('long_smoothing_length','d',sc_dims)\n #q = nc_file.createVariable('heat_flux','d',v_dims)\n #vsm= nc_file.createVariable('smoothed_velocity','d',v_dims)\n #psm =nc_file.createVariable('smoothed_pressure','d',sc_dims)\n #tmpsm =nc_file.createVariable('smoothed_temperature','d',sc_dims)\n #grad_rho = nc_file.createVariable('density_gradient','d',v_dims)\n #ptype = nc_file.createVariable('particle_type','u1',sc_dims)\n\n #now set up the non-particle averaged or total system variables\n # kinetic energy, internal energy, isolated Hamiltonian\n\n #V = nc_file.createVariable('total_kinetic_energy','d',tot_dims) \n #T = nc_file.createVariable('total_internal_energy','d',tot_dims)\n #tav = nc_file.createVariable('average_temp','d',tot_dims)\n #rhoav = nc_file.createVariable('rho_average','d',tot_dims)\n #tstat_energy = nc_file.createVariable('thermostat_energy','d',tot_dims)\n #TV = nc_file.createVariable('hamiltonian','d',tot_dims)\n #dti = nc_file.createVariable('dt','d',tot_dims)\n #sys_dt = nc_file.createVariable('systime','d',tot_dims)\n \n nc_file.sync()\n nc_file.close()", "def prepare_study(netcdf_files,\n cell_list=None,\n region_select_dict=None, # dict if multiple da\n dataarray_select=None, # list if multiple da\n mc_type=None,\n count_type=None,\n save_path=None,\n compression=False,\n compression_level=1,\n iter_chunksize=None):\n # netcdf_files\n if isinstance(netcdf_files, str):\n netcdf_files = sorted(glob.glob(netcdf_files))\n else:\n if not isinstance(netcdf_files, list):\n raise TypeError(f'netcdf_files should be either str or list, '\n f'provided {type(netcdf_files)}')\n\n if len(netcdf_files) == 0:\n print('No valid path provided.')\n return None\n for file_path in netcdf_files:\n if not pathlib.Path(file_path).exists():\n raise ValueError(f'{file_path} do not exist.')\n\n if cell_list is None:\n # no cell list, select all cells\n cell_list = []\n for file_path in netcdf_files:\n ds_index = _get_ds_indexes(file_path)\n cell_list += ds_index['cell'].tolist()\n cell_list = list(set(cell_list))\n\n if isinstance(dataarray_select, str):\n dataarray_select = [dataarray_select]\n if isinstance(mc_type, str):\n mc_type = [mc_type]\n if isinstance(count_type, str):\n count_type = [count_type]\n\n if iter_chunksize is None:\n combined_data = _process_cell_list(netcdf_files,\n cell_list,\n region_select_dict,\n dataarray_select,\n mc_type,\n count_type)\n if combined_data is None:\n return None\n if save_path is not None:\n encoding_dict = {}\n for da in combined_data.data_vars:\n encoding_dict[da] = {'zlib': compression,\n 'complevel': compression_level}\n combined_data.to_netcdf(save_path, encoding=encoding_dict)\n return None\n else:\n return MCDS(combined_data)\n else:\n # generator mode\n return _prepare_study_generator(\n netcdf_files=netcdf_files,\n cell_list=cell_list,\n region_select_dict=region_select_dict, # dict if multiple da\n dataarray_select=dataarray_select, # list if multiple da\n mc_type=mc_type,\n count_type=count_type,\n iter_chunksize=iter_chunksize\n )", "def open_nc(self):\n \n if not self._out_nc:\n self._logger.error('The NetCDF file has not been initialized')\n return\n \n if self._nc and self._nc.isopen():\n raise GliderNetCDFWriterException('netCDF4.Dataset is already open: {:s}'.format(self._nc))\n \n # Open the NetCDF in append mode\n self._nc = Dataset(self._out_nc, mode='a')\n \n # Starting index of record dimension\n self._stream_index = self._get_record_dim_len()", "def create_CF_NetCDF(arcpy, in_raster, rootgrp, map_pro, projdir, DXDY_dict, sr, GeoTransformStr, addLatLon=False, notes='', loglines=[], addVars=[]):\n\n tic1 = time.time()\n loglines.append('Creating CF-netCDF File.')\n arcpy.AddMessage(loglines[-1])\n\n # Local variables\n copydims = []\n copyvars = []\n copyatts = []\n ## copyatts = ['CEN_LAT','CEN_LON', 'TRUELAT1', 'TRUELAT2', 'MOAD_CEN_LAT', 'STAND_LON',\n ## 'POLE_LAT', 'POLE_LON', 'corner_lats', 'corner_lons', 'MAP_PROJ'] # only keep these global attributes from GEOGRID file\n\n # For copying dimensions, variables, attributes from input GEOGRID file to output spatial metadata file\n ## # Use only necessary GEOGRID dimensions, variables, and attributes\n ## if format_out == 'LDASOUT':\n ##\n ## # Copy certain dimensions from the GEOGRID\n ## copydims = ['Time', 'west_east', 'south_north']\n ## copyvars = ['XLAT_M', 'XLONG_M']\n\n ## # Copy dimensions if necessary\n ## for dname, dimension in rootgrp2.dimensions.iteritems():\n ## if dname in copydims:\n ## rootgrp.createDimension(dname, len(dimension) if not dimension.isunlimited() else None)\n ##\n ## # Copy certain spatial variables and any coordinate variables\n ## for v_name, variable in rootgrp2.variables.iteritems():\n ## if v_name in copyvars or v_name in copydims:\n ## var_vals = variable[:] # Read variable values\n ## varatts = {k: variable.getncattr(k) for k in variable.ncattrs()} # Read variable attributes\n ## var_dtype = variable.datatype # Read variable data type\n ## var_dims = variable.dimensions # Read variable dimensions\n ## outVar = rootgrp.createVariable(v_name, var_dtype, var_dims) # Create variable\n ## outVar.setncatts(varatts) # Set variable attributes\n ## outVar[:] = var_vals # Set variable values\n ##\n ## # Copy global attributes (remove strings as this may be causing a problem)\n ## inglobalatts = rootgrp2.__dict__ # Global attributes from GEOGRID file\n ## outglobalatts = {key:value for key,value in inglobalatts.iteritems() if key in copyatts}\n ## outglobalatts.update(DXDY_dict) # Add in the new DX/DY values\n ## rootgrp.setncatts(outglobalatts)\n ## rootgrp2.close() # Close the GEOGRID file\n\n # Gather projection information from input raster projection\n descData = arcpy.Describe(in_raster)\n dim1size = descData.width\n dim2size = descData.height\n sr = descData.SpatialReference\n srType = sr.type\n PE_string = sr.exportToString()\n PE_string = PE_string.replace(\"'\", '\"') # Replace ' with \" so Esri can read the PE String properly when running NetCDFtoRaster\n loglines.append(' Esri PE String: %s' %PE_string)\n arcpy.AddMessage(loglines[-1])\n\n # Find name for the grid mapping\n if CF_projdict.get(map_pro) is not None:\n grid_mapping = CF_projdict[map_pro]\n else:\n grid_mapping = sr.name\n loglines.append(' Map Projection of input raster (not a WRF projection): %s' %grid_mapping)\n arcpy.AddMessage(loglines[-1])\n\n # Must handle difference between ProjectionCoordinateSystem and LatLonCoordinateSystem\n if srType == 'Geographic':\n CoordSysVarName = \"LatLonCoordinateSystem\"\n elif srType == 'Projected':\n CoordSysVarName = \"ProjectionCoordinateSystem\"\n #proj_units = sr.linearUnitName.lower() # sr.projectionName wouldn't work for a GEOGCS\n proj_units = 'm' # Change made 11/3/2016 by request of NWC\n\n # Create Dimensions\n dim_y = rootgrp.createDimension('y', dim2size)\n dim_x = rootgrp.createDimension('x', dim1size)\n print ' Dimensions created after {0: 8.2f} seconds.'.format(time.time()-tic1)\n\n # Create coordinate variables\n var_y = rootgrp.createVariable('y', 'f8', 'y') # (64-bit floating point)\n var_x = rootgrp.createVariable('x', 'f8', 'x') # (64-bit floating point)\n if srType == 'Geographic':\n #var_y.standard_name = ''\n #var_x.standard_name = ''\n var_y.long_name = \"latitude coordinate\"\n var_x.long_name = \"longitude coordinate\"\n var_y.units = \"degrees_north\"\n var_x.units = \"degrees_east\"\n var_y._CoordinateAxisType = \"Lat\"\n var_x._CoordinateAxisType = \"Lon\"\n elif srType == 'Projected':\n # Set variable attributes\n var_y.standard_name = 'projection_y_coordinate'\n var_x.standard_name = 'projection_x_coordinate'\n var_y.long_name = 'y coordinate of projection'\n var_x.long_name = 'x coordinate of projection'\n var_y.units = proj_units # was 'meter', now 'm'\n var_x.units = proj_units # was 'meter', now 'm'\n var_y._CoordinateAxisType = \"GeoY\" # Use GeoX and GeoY for projected coordinate systems only\n var_x._CoordinateAxisType = \"GeoX\" # Use GeoX and GeoY for projected coordinate systems only\n var_y.resolution = float(DXDY_dict['DY']) # Added 11/3/2016 by request of NWC\n var_x.resolution = float(DXDY_dict['DX']) # Added 11/3/2016 by request of NWC\n\n # Scalar projection variable - http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/reference/StandardCoordinateTransforms.html\n proj_var = rootgrp.createVariable(CoordSysVarName, 'S1') # (Scalar Char variable)\n proj_var._CoordinateAxes = 'y x' # Coordinate systems variables always have a _CoordinateAxes attribute\n proj_var._CoordinateTransformType = \"Projection\"\n proj_var.transform_name = grid_mapping # grid_mapping\n proj_var.grid_mapping_name = grid_mapping # for CF compatibility\n proj_var._CoordinateAxes = 'y x' # Optional for dealing with implicit coordinate systems\n proj_var.esri_pe_string = PE_string # For ArcGIS\n proj_var.spatial_ref = PE_string # For GDAl\n proj_var.GeoTransform = GeoTransformStr # For GDAl - GeoTransform array\n\n # Projection specific parameters - http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/reference/StandardCoordinateTransforms.html\n if map_pro == 1:\n # Lambert Conformal Conic\n\n # Required transform variables\n proj_var.standard_parallel = sr.standardParallel1, sr.standardParallel2 # Double\n proj_var.longitude_of_central_meridian = float(sr.centralMeridianInDegrees) # Double\n proj_var.latitude_of_projection_origin = float(sr.latitudeOfOrigin) # Double\n\n # Optional tansform variable attributes\n proj_var.false_easting = float(sr.falseEasting) # Double Always in the units of the x and y projection coordinates\n proj_var.false_northing = float(sr.falseNorthing) # Double Always in the units of the x and y projection coordinates\n proj_var.earth_radius = 6370000.0 # OPTIONAL. Parameter not read by Esri. Default CF sphere: 6371.229 km\n #proj_var.semi_major_axis = 6370000.0 # Double - optional Lambert Conformal Conic parameter\n #proj_var.semi_minor_axis = 6370000.0 # Double - optional Lambert Conformal Conic parameter\n #proj_var.inverse_flattening = 0.0 # Double - optional Lambert Conformal Conic parameter\n\n elif map_pro == 2:\n # Polar Stereographic\n\n # Required transform variables\n proj_var.longitude_of_projection_origin = float(sr.longitudeOfOrigin) # Double - proj_var.straight_vertical_longitude_from_pole = ''\n proj_var.latitude_of_projection_origin = float(sr.latitudeOfOrigin) # Double\n proj_var.scale_factor_at_projection_origin = float(sr.scaleFactor) # Double\n\n # Optional tansform variable attributes\n proj_var.false_easting = float(sr.falseEasting) # Double Always in the units of the x and y projection coordinates\n proj_var.false_northing = float(sr.falseNorthing) # Double Always in the units of the x and y projection coordinates\n #proj_var.semi_major_axis = 6370000.0 # Double - optional Lambert Conformal Conic parameter\n #proj_var.semi_minor_axis = 6370000.0 # Double - optional Lambert Conformal Conic parameter\n #proj_var.inverse_flattening = 0.0 # Double - optional Lambert Conformal Conic parameter\n\n elif map_pro == 3:\n # Mercator\n\n # Required transform variables\n proj_var.longitude_of_projection_origin = float(sr.longitudeOfOrigin) # Double\n proj_var.latitude_of_projection_origin = float(sr.latitudeOfOrigin) # Double\n proj_var.standard_parallel = float(sr.standardParallel1) # Double\n\n elif map_pro == 6:\n # Cylindrical Equidistant or rotated pole\n\n #http://cfconventions.org/Data/cf-conventions/cf-conventions-1.6/build/cf-conventions.html#appendix-grid-mappings\n # Required transform variables\n #proj_var.grid_mapping_name = \"latitude_longitude\" # or \"rotated_latitude_longitude\"\n\n #loglines.append(' Cylindrical Equidistant projection not supported.')\n #arcpy.AddMessage(loglines[-1])\n #raise SystemExit\n pass # No extra parameters needed for latitude_longitude\n\n # For prefilling additional variables and attributes on the same 2D grid, given as a list [[<varname>, <vardtype>, <long_name>],]\n for varinfo in addVars:\n ncvar = rootgrp.createVariable(varinfo[0], varinfo[1], ('y', 'x'))\n ncvar.esri_pe_string = PE_string\n ncvar.grid_mapping = CoordSysVarName\n #ncvar.spatial_ref = PE_string # For GDAl\n #ncvar.GeoTransform = GeoTransformStr # For GDAl - GeoTransform array\n #ncvar.long_name = varinfo[2]\n #ncvar.units = varinfo[3]\n\n # Get x and y variables for the netCDF file\n xmap, ymap, loglines2 = getxy(in_raster, projdir, [])\n loglines += loglines2\n ymaparr = arcpy.RasterToNumPyArray(ymap)\n xmaparr = arcpy.RasterToNumPyArray(xmap)\n\n # Assumes even spacing in y across domain\n var_y[:] = ymaparr[:,0]\n var_x[:] = xmaparr[0,:]\n arcpy.Delete_management(xmap)\n arcpy.Delete_management(ymap)\n del xmap, ymap, loglines2\n\n loglines.append(' Coordinate variables and variable attributes set after {0: 8.2f} seconds.'.format(time.time()-tic1))\n arcpy.AddMessage(loglines[-1])\n\n if addLatLon == True:\n\n loglines.append(' Proceeding to add LATITUDE and LONGITUDE variables after {0: 8.2f} seconds.'.format(time.time()-tic1))\n arcpy.AddMessage(loglines[-1])\n\n # Populate this file with 2D latitude and longitude variables\n # Latitude and Longitude variables (WRF)\n lat_WRF = rootgrp.createVariable('LATITUDE', 'f4', ('y', 'x')) # (32-bit floating point)\n lon_WRF = rootgrp.createVariable('LONGITUDE', 'f4', ('y', 'x')) # (32-bit floating point)\n lat_WRF.long_name = 'latitude coordinate' # 'LATITUDE on the WRF Sphere'\n lon_WRF.long_name = 'longitude coordinate' # 'LONGITUDE on the WRF Sphere'\n lat_WRF.units = \"degrees_north\"\n lon_WRF.units = \"degrees_east\"\n lat_WRF._CoordinateAxisType = \"Lat\"\n lon_WRF._CoordinateAxisType = \"Lon\"\n lat_WRF.grid_mapping = CoordSysVarName # This attribute appears to be important to Esri\n lon_WRF.grid_mapping = CoordSysVarName # This attribute appears to be important to Esri\n lat_WRF.esri_pe_string = PE_string\n lon_WRF.esri_pe_string = PE_string\n #lat_WRF.spatial_ref = PE_string # For GDAl\n #lon_WRF.spatial_ref = PE_string # For GDAl\n #lat_WRF.GeoTransform = GeoTransformStr # For GDAl - GeoTransform array\n #lon_WRF.GeoTransform = GeoTransformStr # For GDAl - GeoTransform array\n\n # Missing value attribute not needed yet\n #missing_val = numpy.finfo(numpy.float32).min # Define missing data variable based on numpy\n #lat_WRF.missing_value = missing_val # Float sys.float_info.min?\n #lon_WRF.missing_value = missing_val # Float sys.float_info.min?\n\n '''Adding the Esri PE String in addition to the CF grid mapping attributes\n is very useful. Esri will prefer the PE string over other CF attributes,\n allowing a spherical datum to be defined. Esri can interpret the coordinate\n system variable alone, but will assume the datum is WGS84. This cannot be\n changed except when using an Esri PE String.'''\n\n ## # Create a new coordinate system variable\n ## LatLonCoordSysVarName = \"LatLonCoordinateSystem\"\n ## latlon_var = rootgrp.createVariable(LatLonCoordSysVarName, 'S1') # (Scalar Char variable)\n ## latlon_var._CoordinateAxes = 'LATITUDE LONGITUDE' # Coordinate systems variables always have a _CoordinateAxes attribute\n\n # Data variables need _CoodinateSystems attribute\n lat_WRF._CoordinateAxisType = \"Lat\"\n lon_WRF._CoordinateAxisType = \"Lon\"\n lat_WRF._CoordinateSystems = CoordSysVarName\n lon_WRF._CoordinateSystems = CoordSysVarName\n ## lat_WRF._CoordinateSystems = \"%s %s\" %(CoordSysVarName, LatLonCoordSysVarName) # For specifying more than one coordinate system\n ## lon_WRF._CoordinateSystems = \"%s %s\" %(CoordSysVarName, LatLonCoordSysVarName) # For specifying more than one coordinate system\n\n # Create latitude and longitude rasters\n try:\n # Try to trap any errors in this try statement\n\n # Get lat and lon grids on WRF Sphere\n wkid = 104128 # Using EMEP Sphere (6370000m)\n loglines2, xout, yout, xmap, ymap = create_lat_lon_rasters(arcpy, projdir, in_raster, wkid)\n loglines += loglines2\n arcpy.Delete_management(xmap)\n arcpy.Delete_management(ymap)\n del xmap, ymap, loglines2\n\n # Populate netCDF variables using converted numpy arrays\n youtarr = arcpy.RasterToNumPyArray(yout)\n xoutarr = arcpy.RasterToNumPyArray(xout)\n loglines.append(' youtarr.shape : %s, %s' %(youtarr.shape[0], youtarr.shape[1]))\n arcpy.AddMessage(loglines[-1])\n loglines.append(' xoutarr.shape : %s, %s' %(xoutarr.shape[0], xoutarr.shape[1]))\n arcpy.AddMessage(loglines[-1])\n loglines.append(' lat_WRF.shape : %s, %s' %(lat_WRF.shape[0], lat_WRF.shape[1]))\n arcpy.AddMessage(loglines[-1])\n loglines.append(' lon_WRF.shape : %s, %s' %(lon_WRF.shape[0], lon_WRF.shape[1]))\n arcpy.AddMessage(loglines[-1])\n lat_WRF[:] = youtarr\n lon_WRF[:] = xoutarr\n del xout, yout, youtarr, xoutarr, in_raster\n\n loglines.append(' Variables populated after {0: 8.2f} seconds.'.format(time.time()-tic1))\n arcpy.AddMessage(loglines[-1])\n loglines.append(' Process completed without error.')\n arcpy.AddMessage(loglines[-1])\n\n loglines.append(' LATITUDE and LONGITUDE variables and variable attributes set after {0: 8.2f} seconds.'.format(time.time()-tic1))\n arcpy.AddMessage(loglines[-1])\n\n except Exception as e:\n loglines.append(' Process did not complete. Error: %s' %e)\n arcpy.AddMessage(loglines[-1])\n\n # Global attributes\n rootgrp.GDAL_DataType = 'Generic'\n rootgrp.Conventions = 'CF-1.5' # Maybe 1.0 is enough?\n rootgrp.Source_Software = 'WRF-Hydro GIS Pre-processor'\n rootgrp.history = 'Created %s' %time.ctime()\n rootgrp.processing_notes = notes\n loglines.append(' netCDF global attributes set after {0: 8.2f} seconds.'.format(time.time()-tic1))\n arcpy.AddMessage(loglines[-1])\n\n # Return the netCDF file to the calling script\n return rootgrp, grid_mapping, loglines" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method helps to set prior for object. This prior can sample proposal theta by sample method
def set_prior(self):
[ "def initPriorHypo(self):\n self.priorHypo = np.ones(model.nhypo)/model.nhypo", "def prior(self) -> tfp.distributions.Distribution:\n pass", "def set_default_prior_parameters(self):\n \n # Normal prior (default)\n if self.prior_type == \"normal\":\n self.prior_parameters = {\"mean\": to_row(0.00), \"cov\": np.diag([1.00])}", "def priorLikelihood(self, step):", "def init_params_random(self) -> None:\n self.probs = Dirichlet(self.prior).sample()", "def set_prior(self, **kwargs):\n self._prior_settings.update(kwargs)", "def set_prior_func(self, prior):\n if prior is None:\n self.logger.debug(\"Setting prior to None\")\n self.pr = prior\n else:\n self.logger.debug(\"Setting up prior function\")\n if isinstance(prior, dict):\n if not hasattr(self, \"pr\") or self.pr is None:\n self.pr = _FlatPriorFunctionWrapper(prior)\n else:\n self.pr.extents = prior\n else:\n self.pr = prior\n self.logger.debug(\"done\")", "def noisePrior(self, prior=1.0):\n self.options.noise=prior\n self.initialized = False\n\n # for chaining\n return self", "def noisePrior(self):\n self.options.useAutoNoise = True\n self.initialized = False\n\n # for chaining\n return self", "def setPrior(self, mean = None, covar = None, weight = None, scale = 1.0):\n # Handle mean/covar being None...\n if mean==None or covar==None:\n inc = gcp.GaussianInc(self.dims)\n dm = self.getDM()\n for i in xrange(dm.shape[0]): inc.add(dm[i,:])\n ggd = inc.fetch()\n if mean==None: mean = ggd.getMean()\n if covar==None: covar = ggd.getCovariance() * scale\n\n if numpy.linalg.det(covar)<1e-12: return False\n\n # Update the prior...\n self.prior.reset()\n self.prior.addPrior(mean, covar, weight)\n self.priorT = self.prior.intProb()\n\n return True", "def _get_default_parameters_prior(self):\n prior = {\n \"alpha_concentration0\": 2.0,\n \"beta_concentration0\": 0.1,\n }\n return prior", "def add_prior(self, **kwargs):\n\n for key in kwargs:\n if key in self.prior:\n prior = kwargs[key]\n self.prior[key] = prior\n else:\n raise ValueError(\"prior must be one of {}\".format(self.prior.keys()))", "def prior(cube, ndim, nparams):\n\n cube[0] = pri.GeneralPrior(cube[0], 'U', 0, 1) \n cube[1] = pri.GeneralPrior(cube[1], 'U', -numpy.pi/2.0, numpy.pi/2.0)\n cube[2] = pri.GeneralPrior(cube[2], 'U', -2000, 2000.0)", "def update_prior(self, **kws):\n if self.result is not None:\n # TODO: fail?\n logging.warning(\"updating prior of Fit with preexisting results!\")\n valid_keys = self.valid_model_options\n valid_keys_low = [k.lower() for k in valid_keys]\n for k, v in kws.items():\n if k.lower() in valid_keys_low:\n i = valid_keys_low.index(k.lower())\n self._prior_settings[valid_keys[i]] = v\n else:\n raise ValueError('{} is not a valid model argument.'\n 'Valid options are: {}'.format(k, valid_keys))\n # reset model cache\n self._pymc_model = None\n\n if self.prior is not None:\n logging.warning(\"updating prior of Fit with preexisting prior results!\")", "def initialize_priors(self):\n\n variable = np.array(self.defaults.variable)\n variable = self.defaults.variable\n if np.array(variable).dtype != object:\n variable = np.atleast_2d(variable)\n\n n = len(variable[0])\n\n if isinstance(self.mu_0, (int, float)):\n self.mu_prior = np.full((n, 1),self.mu_0)\n else:\n if len(self.mu_0) != n:\n raise FunctionError(\"Length of mu_0 ({}) does not match number of predictors ({})\".\n format(len(self.mu_0), n))\n self.mu_prior = np.array(self.mu_0).reshape(len(self._mu_0),1)\n\n if isinstance(self.sigma_0, (int, float)):\n Lambda_0 = (1 / (self.sigma_0 ** 2)) * np.eye(n)\n else:\n if len(self.sigma_0) != n:\n raise FunctionError(\"Length of sigma_0 ({}) does not match number of predictors ({})\".\n format(len(self.sigma_0), n))\n Lambda_0 = (1 / (np.array(self.sigma_0) ** 2)) * np.eye(n)\n self.Lambda_prior = Lambda_0\n\n # before we see any data, the posterior is the prior\n self.mu_n = self.mu_prior\n self.Lambda_n = self.Lambda_prior\n self.gamma_shape_n = self.gamma_shape_0\n self.gamma_size_n = self.gamma_size_0", "def __init__(self):\n self.prob_heads = random()", "def prior(cube, ndim, nparams):\n\n cube[0] = pri.GeneralPrior(cube[0], 'U', 0, 1) \n cube[1] = pri.GeneralPrior(cube[1], 'U', -numpy.pi/2.0, numpy.pi/2.0)\n cube[2] = pri.GeneralPrior(cube[2], 'U', -2000, 2000.0)\n cube[3] = pri.GeneralPrior(cube[3], 'U', 0, 400)", "def __init__(self, prior, transition):\n self.prior = prior\n self.transition = transition\n self.new_w_vec = np.array([])\n self.new_theta_vec = np.array([])\n self.new_num = 0", "def lnprior(p):\n return 0", "def setBackgroundPrior(self, bgprior) -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method helps to generate replica data from proposal p(theta) from prior This method return replicated data
def generate_replica(self):
[ "def create_petri_net_data_backup(self):\n self._prev_pn_data = self._pn_data.clone()", "def posterior(self): \n # create a grid over which we will calculate the likelihood\n self.p_grid = np.linspace(0, 1, num = self.g)\n # calculate the probability of observing the data\n self.likelihood = stats.binom.pmf(self.k,self.n,p = self.p_grid)\n # multiply with prior\n unst_posterior = self.prior * self.likelihood\n # standardize\n self.stand_posterior = unst_posterior / np.sum(unst_posterior)\n \n #sample from posterior\n np.random.seed(42)\n self.samples = np.random.choice(a=self.p_grid,size=self.i,replace=True,p=self.stand_posterior)\n\n #calculate posterior predictive distribution\n self.posterior_predictive_dist = stats.binom.rvs(n=self.n,p=self.samples,size=self.i)", "def clone_with_theta(self, theta):\n ...", "def propose(self):\n runenv.stepblockind=self.blockind\n pvalue=np.copy(self.proposevalue)\n for i in range(len(self.proposemethod)):\n if self.proposemethod[i] == \"pnormal\":\n while True:\n value=np.random.normal(self.stochastic.value[self.parindex[i]:self.parindex[i+1]], self.adaptive_scale_factor * self.proposal_sd[self.parindex[i]:self.parindex[i+1]])\n if np.all(value>0):\n break\n pvalue[self.parindex[i]:self.parindex[i+1]]=value\n #print value\n elif self.proposemethod[i] == \"splineparn\":\n while True:\n sv=self.stochastic.value[self.parindex[i]:self.parindex[i+1]]\n svr=np.copy(sv)\n svr[:-1]=sv[1:]-sv[:-1]\n value=np.random.multivariate_normal(svr[:-1],self.proposepars[i][-1]*(self.adaptive_scale_factor**2))\n if np.any(value<=0):\n continue\n parvalue=value.cumsum()+self.proposepars[i][0]\n if parvalue[-1]<self.proposepars[i][1]:\n break\n #print value\n #print parvalue\n pvalue[self.parindex[i]+1:self.parindex[i+1]]=parvalue\n pvalue[self.parindex[i]]=self.proposepars[i][0]\n self.stochastic.value=pvalue", "def _proposal(self, currval, params):\n\t\treturn self._sample_impl(params)", "def point_mutation(self, clone, mutation_rate):\r\n for i in range(0, len(clone.paratopes)):\r\n if random() < mutation_rate:\r\n clone.paratopes[i] = self.rand_paratope()\r\n return clone", "def generate_data_oscilatory(nTrials, N, T,freq_coinc, amp_coinc, offset_coinc,freq_bg, amp_bg,offset_bg,RateJitter = 10*pq.Hz):\n# from stocmod import poisson_nonstat as pn\n import neo\n h = 1*pq.ms\n # modulatory coincidence rate\n tc = numpy.arange(0,T.rescale('ms').magnitude,h.rescale('ms').magnitude)*pq.ms\n bbc = (2*numpy.pi*freq_coinc*tc).simplified\n coincrate = offset_coinc+ amp_coinc*numpy.sin(bbc)*offset_coinc.units\n coincrate[coincrate <0*coincrate.units]=0*coincrate.units\n\n # background rate\n tb = numpy.arange(0,T.rescale('ms').magnitude,h.rescale('ms').magnitude)*pq.ms\n bbb = (2*numpy.pi*freq_bg*tb).simplified\n backgroundrate = offset_bg+ amp_bg*numpy.sin(bbb)*offset_bg.units\n backgroundrate[backgroundrate <0*backgroundrate.units]=0*backgroundrate.units\n\n # inhomogenious rate across trials\n rndRateJitter = (numpy.random.rand(nTrials)-0.5)*RateJitter\n spiketrain = []\n for i in range(nTrials):\n rate_signal_bg = neo.AnalogSignal((backgroundrate.rescale('Hz')+rndRateJitter[i]).magnitude,sampling_period=h, units=pq.Hz,t_start=0*pq.ms)\n rate_signal_coinc = neo.AnalogSignal(coincrate.rescale('Hz').magnitude,sampling_period=h, units=pq.Hz,t_start=0*pq.ms)\n sts_bg = poisson_nonstat(rate_signal_bg,N=N)\n # inserting coincidences\n sts_coinc = poisson_nonstat(rate_signal_coinc,N=1)\n sts_bg_coinc = []\n for j in sts_bg:\n sts_bg_coinc.append(\n neo.SpikeTrain(numpy.sort(numpy.append(j.magnitude, sts_coinc[0].magnitude))*j.units,\n t_start=j.t_start,t_stop = j.t_stop))\n spiketrain.append(sts_bg_coinc)\n return {'st':spiketrain, 'backgroundrate':backgroundrate, 'coincrate':coincrate}", "def data_generator_simulation1():\n # Target : 1 nuage de point\n nt = 1000\n mu_t = np.array([50, 50])\n cov_t = np.array([[60, 40], \n [40, 60]])\n xt = ot.datasets.make_2D_samples_gauss(nt, mu_t, cov_t)\n\n # Source : 3 nuages de points\n ns1 = 700\n mu_s = np.array([25, 60])\n cov_s = np.array([[30, 10], \n [10, 30]])\n xs = ot.datasets.make_2D_samples_gauss(ns1, mu_s, cov_s)\n\n ns2 = 400\n mu_s = np.array([55, 80])\n cov_s = np.array([[30, 10], \n [10, 30]])\n xs=np.append(xs,ot.datasets.make_2D_samples_gauss(ns2, mu_s, cov_s),axis=0)\n\n\n # Compute the distribution laws associate with the clouds of dots.\n ns=ns1+ns2\n a, b = ot.unif(ns), ot.unif(nt) # uniform distribution on samples\n return (xs,a),(xt,b)", "def assign_SNP(self):\n\n\t\tlog.info('{}{}{}{}'.format(clr.green,'hplc__ ',clr.end,'Assigning data on SNP-basis..'))\n\n\t\tfor index, row in self.mutation_dataframe.iterrows():\n\t\t\tcurrent_SNP = row['SNPID']\n\t\t\tcurrent_call = row['Call']\n\t\t\tcurrent_haplotype = row['CTG repeat allele1 allele2']\n\t\t\tallele_status = row['Unnamed: 11']\n\n\t\t\t## some entries have slashes instead of spaces, for some fucking reason\n\t\t\tif '/' in current_haplotype:\n\t\t\t\tcurrent_haplotype = current_haplotype.replace('/', ' ')\n\n\t\t\t## split data so alleles can be assigned to the respective status values\n\t\t\t## create tuples for alleles, with genotype, status as data\n\t\t\tsplit_haplotype = current_haplotype.split(' ')\n\t\t\tsplit_status = allele_status.split('/')\n\t\t\tprimary_allele = (split_haplotype[0], split_status[0])\n\t\t\tsecondary_allele = (split_haplotype[1], split_status[1])\n\t\t\tcall_vector = [current_call, primary_allele, secondary_allele]\n\n\t\t\t## if the current SNP already has an object made in our instance dictionary of SNPs\n\t\t\t## append the current haplotype for this new observation\n\t\t\t## otherwise, no object has been made; make one and append observation data\n\t\t\tif current_SNP in self.instance_mutation_population:\n\t\t\t\tself.instance_mutation_population[current_SNP].append_haplotype(current_haplotype)\n\t\t\t\tself.instance_mutation_population[current_SNP].append_call(call_vector)\n\t\t\t\tself.instance_mutation_population[current_SNP].append_condition(primary_allele)\n\t\t\t\tself.instance_mutation_population[current_SNP].append_condition(secondary_allele)\n\t\t\telse:\n\t\t\t\tsnp_object = IndividualSNP()\n\t\t\t\tsnp_object.set_SNPID(current_SNP)\n\t\t\t\tsnp_object.append_haplotype(current_haplotype)\n\t\t\t\tsnp_object.append_call(call_vector)\n\t\t\t\tsnp_object.append_condition(primary_allele)\n\t\t\t\tsnp_object.append_condition(secondary_allele)\n\t\t\t\tself.instance_mutation_population[current_SNP] = snp_object", "def fit(self):\n # if self.verbose == 1:\n # print ('The list of all perturbation with its probability: \\n')\n # for perturb in range(len(self.p_list)):\n # print('%s perturbation with probability of: %s \\n' %(self.p_list[perturb], self.p_prob[perturb]))\n #p_current, error_vec_current ,error_vec_normal_current = self.minus_log_prob_neuron(self.neuron) # log probability of the current neuron\n p_current, error_vec_current ,error_vec_normal_current = self.kl_distance(self.neuron) # log probability of the current neuron\n acc = 0\n for i in range(self.ite):\n if(self.verbose ==1):\n #p_current, er , error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, er , error_vec_normal_current = self.kl_distance(self.neuron)\n #print('feature of current is: \\n %s' %(self.neuron.features)+ '\\n')\n print('\\n and its probability is: %s' %p_current)\n per = self.select_proposal() # MCMC index\n p_sym, details = self.do_MCMC(per)\n #p_proposal, error_vec_proposal, error_vec_normal_proposal = self.minus_log_prob_neuron(self.neuron)\n p_proposal, error_vec_proposal, error_vec_normal_proposal = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n #print('feature of proposal is: \\n %s' %(self.neuron.features))\n print('\\n and its probability is: %s' %p_proposal)\n a = min(1, p_sym * np.exp(p_current - p_proposal)) # Metropolis choice, notice that the values are minus log probability\n B = self.accept_proposal(a) # the boolean of acceptance\n if(B):\n p_current = p_proposal\n error_vec_current = error_vec_proposal\n error_vec_normal_current = error_vec_normal_proposal\n self.trend[:,i] = error_vec_proposal\n self.trend_normal[:,i] = error_vec_normal_proposal\n acc = acc + 1\n else:\n self.undo_MCMC(per, details)\n self.trend[:,i] = error_vec_current\n self.trend_normal[:,i] = error_vec_normal_current\n if len(self.neuron.nodes_list) == self.neuron.n_soma:\n self.neuron = self.initial_neuron(int(self.n_node/self.initial_seg),self.initial_seg)\n #p_current, error_vec_current, error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, error_vec_current, error_vec_normal_current = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n print ('\\n')\n print('Selected perturbation = ' + per)\n print('the p of acceptance was %s and it was %s that it`s been accepted.'%(a,B))\n print ('\\n')\n if(np.remainder(i,100)==0):\n self.evo.append(deepcopy(self.neuron))\n self.neuron.set_nodes_values()\n print acc", "def _sample_goal(self) -> np.ndarray:\n goal = np.array(get_link_pose(self.obj_ids['fixed'][1], self._pegs[0])[0])\n return goal.copy()", "def mutation(biny,mirna,mp, data):\n for i in range(0, len(biny)):\n #for each individual check if random number < cp\n if random.uniform(0,1)< mp:\n #if yes choose random gen for mutation\n zufall = random.randint(0,len(biny[i])-1)\n #if random <0.5 exchange of gene\n if random.uniform(0,1)< 0.5:\n biny[i] = np.delete(biny[i],zufall,axis = 0)\n mirna[i].pop(zufall)\n new = data.sample(n=1, axis =1)\n biny[i] = np.append(biny[i], np.array(new.values.T), axis = 0)\n mirna[i]+=list(new.columns)\n #else: not-transform given gene\n else:\n biny[i][zufall] = 1 - biny[i][zufall]\n if \"not\" not in mirna[i][zufall]:\n mirna[i][zufall] = \"not_\" + mirna[i][zufall]\n\n return biny, mirna", "def clone(self, data):", "def generate_recog_data(T=2000, d=50, R=1, P=0.5, interleave=True, multiRep=True, xDataVals='+-', softLabels=False): \n if np.isscalar(R):\n Rlist = [R]\n else:\n Rlist = R\n \n data = []\n repeatFlag = False\n r=0 #countdown to repeat\n for t in range(T): \n #decide if repeating\n R = Rlist[np.random.randint(0, len(Rlist))]\n if interleave:\n repeatFlag = np.random.rand()<P\n else:\n if r>0:\n repeatFlag = False\n r-=1\n else:\n repeatFlag = np.random.rand()<P \n if repeatFlag:\n r = R\n \n #generate datapoint\n if t>=R and repeatFlag and (multiRep or data[t-R][1].round()==0):\n x = data[t-R][0]\n y = 1\n else:\n if xDataVals == '+-': #TODO should really do this outside the loop...\n x = 2*np.round(np.random.rand(d))-1\n elif xDataVals.lower() == 'normal':\n x = np.sqrt(d)*np.random.randn(d) \n elif xDataVals.lower().startswith('uniform'):\n upper, lower = parse_xDataVals_string(xDataVals)\n x = np.random.rand(d)*(upper-lower)+lower\n elif xDataVals == '01':\n x = np.round(np.random.rand(d))\n else:\n raise ValueError('Invalid value for \"xDataVals\" arg') \n y = 0\n \n if softLabels:\n y*=(1-2*softLabels); y+=softLabels \n data.append((x,np.array([y]))) \n \n return data_to_tensor(data)", "def preprocess(data): \n print(\"Preprocessing data..\") \n data = data[np.where(data['z'] == data['z'][snapshot])]\n x = data[['sdss_u', 'sdss_g', 'sdss_r', 'sdss_i', 'sdss_z']]\n y = data[['m_star']]#, 'sfr']]\n \n x = np.array(x.tolist()) #convert structured array to array\n y = np.array(y.tolist()) #convert structured array to array\n\n perm = np.random.choice([True, False], len(data), p=[perc_train, 1-perc_train])\n\n return x, y, x[perm,:], y[perm], x[np.invert(perm),:], y[np.invert(perm)]", "def newproteindistributions(protein):\n modules = protein.split()\n\n # construct a list of the joint names\n joints = [' '.join(modules[i:i+3]) for i in range(len(modules)-2)]\n\n # numbers of lengths, angles and dihedrals for new protein\n num_params = param_split('CE', len(modules))\n\n # initialise an empty array for the data for each parameter\n param_dists = [[] for i in range(sum(num_params))]\n\n for joint_num, joint_name in enumerate(joints):\n params = [];\n for rp, j in joint_locations(joint_name):\n for sim in range(1, 101):\n # for each simulation, get the parameters for the representation\n p = get_parameters(rp, sim, 'CE')\n # only keep the the ones relevant to this joint\n params.append(p[np.array(joint_parameter_locations(j, 'CE'))])\n\n # turn params into a matrix with columns containing the distribution\n # of each parameter\n params= np.stack(params)\n\n # append the parameters in the correct place in the new protein\n param_locs = joint_parameter_locations(joint_num,\n 'CE',\n protein_length=len(modules))\n for i in range(len(param_locs)):\n param_dists[param_locs[i]].extend(params[:,i])\n\n return param_dists", "def generateMDP(v,a,G, p =0.9):\n debug = False;\n P= np.zeros((v,v,a)); d = np.zeros((v,a))\n for node in range(v):#x_now = node\n nodeInd = node+1;\n neighbours = list(G.neighbors(nodeInd));\n totalN = len(neighbours);\n # chance of not reaching action\n pNot = (1.-p)/(totalN);\n actionIter = 0;\n if debug: \n print (neighbours);\n for neighbour in neighbours: # neighbour = x_next\n neighbourInd = neighbour - 1;\n P[neighbourInd,node,actionIter] = p;\n # chance of ending somewhere else\n for scattered in neighbours:\n scatteredInd = scattered -1;\n if debug:\n print (scattered);\n if scattered != neighbour:\n # probablity of ending up at a neighbour\n P[scatteredInd,node,actionIter] = pNot;\n # some probability of staying stationary\n P[node,node,actionIter] =pNot;\n actionIter += 1; \n while actionIter < a: # chances of staying still \n P[node, node, actionIter] = 1.0;\n# P[node, node, actionIter] = p;\n# pNot = (1.-p)/(totalN);\n# for scattered in neighbours: \n# scatteredInd = scattered -1;\n# P[scatteredInd,node,actionIter] = pNot;\n actionIter += 1;\n # test the cost function\n c = 1000.*np.ones((v,a))\n c[6] = 0.;\n\n return P,c", "def random(self,p,dis):\n theta0 = MS.theta(p,dis)\n rho = sqrt(3.)/2.\n z1,z2 = random.gauss(0.,1.),random.gauss(0.,1.)\n y = z1*dis*theta0/sqrt(12.)+z2*dis*theta0/2.\n theta = z2*theta0\n debug('msnoise.random p,dis,x,theta ',(p,dis,y,theta))\n return y,theta", "def generate_flight_data_fixture(self, data):\n return copy(data)", "def mate(self) -> None:\n\n self.next_gen = {}\n\n # First, pass the keepers into the next generation\n survive_keys = list(self.survivors.keys())[:self.keepers]\n old_num = {k: v for k, v in self.population.items() if k in survive_keys}\n\n # Renumber keys\n for i, values in enumerate(old_num.items()):\n self.next_gen[i] = values[1]\n\n # We will be keeping the index values for renumbering\n current_key = self.keepers\n\n # Next we select one of the top % to mate with the general population\n mating_keys = list(self.survivors.keys())[:int(self.pop_size * self.mutation)]\n gen_keys = set(self.survivors.keys())\n\n # Choose our lucky couple, partition and mate\n boy_num = choice(mating_keys)\n boy = self.population[boy_num]\n\n # Numpy can't choose from a set, so making a list from set first\n girl_num = choice(list(set(mating_keys) - {boy_num}))\n girl = self.population[girl_num]\n\n # This is the splice partition\n splice = int(len(boy))\n self.next_gen[current_key] = child = {k: v for k, v in boy.items() if k < splice}\n child.update({k: v for k, v in girl.items() if k >= splice})\n print(child)\n child = self.mutate(child) # self.next_gen[current_key]\n current_key += 1\n\n # Now we splice from the other direction\n self.next_gen[current_key] = child = {k: v for k, v in boy.items() if k >= splice}\n child.update({k: v for k, v in girl.items() if k < splice})\n self.next_gen[current_key] = self.mutate(child)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method helps to calculate statistics and compare the distance between replica data and original data. input of this method is data and replica data output is the distance First try with Wassenstein Distance
def distance(self,data,replica):
[ "def distance(self,data,replica):\n weight = np.random.multivariate_normal(mean=np.random.normal(size=self.n_dim),cov = np.eye(self.n_dim),size=self.data_dim)\n weight = weight /np.sqrt(np.sum(weight**2,axis=0,keepdims=True))\n data = np.matmul(data,weight)\n replica = np.matmul(replica,weight)\n \n result = [stats.wasserstein_distance(data[:,i],replica[:,i]) for i in range(len(weight))]\n\n return np.mean(result)\n # return np.abs(np.mean(data) - np.mean(replica)) + np.abs(np.std(data) - np.std(replica))", "def _update_resampler_values(self, cycle_idx, resampling_data, resampler_data):\n\n num_clones = 0\n num_merges = 0\n num_walkers = len(resampling_data)\n for walker_record in resampling_data:\n if walker_record['decision_id'][0]==self.decision.ENUM.CLONE.value:\n num_clones += 1\n elif walker_record['decision_id'][0]==self.decision.ENUM.KEEP_MERGE.value:\n num_merges += 1\n\n self.percentage_cloned_walkers = (num_clones/num_walkers) * 100\n self.percentage_merged_walkers = (num_merges/num_walkers) * 100\n\n #Get the statistics\n for resampler_record in resampler_data:\n self.variation_value = resampler_record['variation'][0]\n distance_matrix = resampler_record['distance_matrix']\n\n\n #get the upper triangle values of the distance_matrix\n distance_matrix = np.triu(distance_matrix)\n distance_values= distance_matrix[np.where(distance_matrix>0)]\n self.avg_distance = np.average(distance_values)\n self.min_distance = np.min(distance_values)\n self.max_distance = np.max(distance_values)\n\n self.cycle_idx = cycle_idx", "def NN_distance_final(tdata):\n\tRAs = tdata['RA_2']\n\tDECs = tdata['DEC_2']\n\n\tx = np.cos(np.radians(RAs)) * np.cos(np.radians(DECs))\n\ty = np.sin(np.radians(RAs)) * np.cos(np.radians(DECs))\n\tz = np.sin(np.radians(DECs))\n\tcoordinates = np.vstack((x,y,z)).T\n\n\tcoordinates_tree = cKDTree(coordinates,leafsize=16)\n\tTheResult_distance = []\n\tfor i,item in enumerate(coordinates):\n\t\t'''\n\t\tFind 2nd closest neighbours, since the 1st is the point itself.\n\n\t\tcoordinates_tree.query(item,k=2)[1][1] is the index of this second closest \n\t\tneighbour.\n\n\t\tWe then compute the spherical distance between the item and the \n\t\tclosest neighbour.\n\t\t'''\n\t\t# print coordinates_tree.query(item,k=2,p=2)\n\t\tindex=coordinates_tree.query(item,k=2,p=2,n_jobs=-1)[1][1]\n\t\tnearestN = [RAs[index],DECs[index]]\n\t\tsource = [RAs[i],DECs[i]]\n\t\t# distance in arcmin\n\t\tdistance = distanceOnSphere(nearestN[0],nearestN[1],#RA,DEC coordinates of the nearest\n\t\t\t\t\t\t\t\tsource[0],source[1])*60 #RA,DEC coordinates of the current item\n\t\t# print distance/60\n\t\tTheResult_distance.append(distance)\t\n\n\treturn TheResult_distance", "def __clean_and_calculate_distance(self):\r\n \r\n \r\n #Getting the returned list from the get method\r\n \r\n graphs_info_list=self.__get()\r\n \r\n print(\"Completed getting the road graphs\")\r\n \r\n print(\"Processing the graphs...\")\r\n \r\n #removing empty lists as a result of polygons with no intersecting roads\r\n \r\n graphs_info_list=[cleaned_list for cleaned_list in graphs_info_list if len(cleaned_list) > 1 ]\r\n \r\n \r\n #instantiating an empty dictionary to store the data\r\n \r\n result_dict={'NAME':[],\"highway_type\":[],'Distance(KM)':[],'Distance(Miles)':[]}\r\n \r\n #<<<<<<<<<<<<<<<<<<<<Data cleaning and manipulation block>>>>>>>>>>>>>>>>>\r\n \r\n for graphs in graphs_info_list:\r\n \r\n graph=graphs[-1]\r\n \r\n print(\"Converting graphs to GeoDataFrame...\")\r\n \r\n graph_nodes,graph_dataframe=ox.graph_to_gdfs(graph)\r\n \r\n print(\"Completed converting graphs to GeoDataFrame ...\")\r\n \r\n #>>>>Calculating distance block<<<<\r\n \r\n #Loop through the geometry column to create a list of coordinate tuples from the geometry\r\n \r\n print('Computing distances in kilometers and miles...')\r\n \r\n for layer,highwaytype in zip(graph_dataframe['geometry'],graph_dataframe[\"highway\"]):\r\n \r\n geometry=list(layer.coords)\r\n \r\n #transforming the coordinate pairs to support geopy distance function\r\n \r\n start_long,start_lat=geometry[0]\r\n \r\n stop_long,stop_lat=geometry[1]\r\n \r\n start=(start_lat,start_long)\r\n \r\n stop=(stop_lat,stop_long)\r\n \r\n d=distance.distance\r\n \r\n distance_km=d(start,stop).km\r\n \r\n distance_miles=d(start,stop).miles\r\n \r\n result_dict['NAME'].append(graphs[1])\r\n \r\n result_dict[\"highway_type\"].append(highwaytype)\r\n \r\n result_dict['Distance(KM)'].append(distance_km)\r\n \r\n result_dict['Distance(Miles)'].append(distance_miles)\r\n \r\n \r\n \r\n print('Completed computing distances...')\r\n \r\n \r\n \r\n print(\"Aggregating results in a dataframe...\")\r\n \r\n result_dataframe=pd.DataFrame(dict([ (column,pd.Series(row)) for column,row in result_dict.items() ]))\r\n \r\n print(\"Completed aggregating results...\")\r\n \r\n #>>>>>>>>>>>grouping DataFrame by highway_type<<<<<<<<<<<<<\r\n \r\n #First we fill missing value because not all roads are classified\r\n \r\n print(\"Filling missing values...\")\r\n \r\n result_dataframe=result_dataframe.fillna(\"No highway category\")\r\n \r\n print(\"Missing values filled...\")\r\n \r\n #summing up each road distances\r\n \r\n print(\"Grouping DataFrame...\")\r\n \r\n #converting keys to tuples to avoid unhashable errors because I figures some highways categories are lists types\r\n \r\n result_dataframe['highway_type']=result_dataframe['highway_type'].apply(lambda x: tuple(x) if type(x)==list else x)\r\n \r\n grouped_dataframe=result_dataframe.groupby(['NAME','highway_type'],as_index=False).sum()\r\n \r\n print(\"Completed grouping DataFrame...\")\r\n \r\n return grouped_dataframe", "def test_compute_distances(self):\n self.cluster_obj_2.centroids = self.X[:2, :]\n distance_matrix = self.cluster_obj_2.compute_distances()\n self.assertEqual(round(distance_matrix[0,0], 2), 0.0)\n self.assertEqual(round(distance_matrix[0,1], 2), 0.42)", "def distance(self, data: np.ndarray) -> np.ndarray:\n data = self.normalize(data)\n dist = self.prototypes - data\n dist = np.sqrt(sum(dist.T**2))\n return dist", "def calculate_distances(self):\n\n # Initialize container.\n distances = np.zeros((len(self.data.stem_ids), 2))\n\n # For each report-abstract pairs\n for i in tqdm(range(len(self.data.stem_ids))):\n\n # Get report, abstract and random other abstract\n report = self.model.doc_vecs.loc['%s_report' % self.data.stem_ids[i]]\n summary = self.model.doc_vecs.loc['%s_abstract' % self.data.stem_ids[i]]\n other = self.model.doc_vecs.loc[self.data.abstract_ids[random.randint(0, len(self.data.abstract_ids)-1)]]\n\n # self.distance_measure is always cosine. Calculate distance.\n if self.distance_measure == 'cosine':\n distances[i][0] = cosine(report, summary)\n distances[i][1] = cosine(report, other)\n\n # Make pandas dataframe, save and return.\n distances = pd.DataFrame(distances, index=self.data.stem_ids, columns=['own', 'other'])\n distances.to_csv(self.model.path / str('distances_%s_%s.csv' % (self.data.name, self.distance_measure)))\n\n return distances", "def distance_by_measurements(singles_1, singles_2, correlators_1, correlators_2):\n\n return ((singles_1 - singles_2) ** 2).mean() + ((correlators_1 - correlators_2) ** 2).mean()\n # return ((singles_1 - singles_2) ** 2).mean()", "def psd_pairwise_comparison():\n # Location of the data \n base_dir = '../example/' \n \n # Data resolution, in nanometers \n resolution = {'res_xy_nm': 100, 'res_z_nm': 70}\n \n # Threshold value for the probability maps. This value does not usually need to be changed. \n thresh = 0.9\n\n # List the file names \n target_filenames = ['PSD95m_1st.tif', 'PSD95r_2nd.tif']\n reference_filenames = ['synapsin_1st.tif', 'synapsin_1st.tif']\n\n # Create a query for each pair\n query_list = []\n for n in range(0, len(target_filenames)):\n target_name = target_filenames[n] # The AB we're interested in testing (PSD)\n reference_name = reference_filenames[n] # The previously validated AB (synapsin)\n \n # Formulate the query\n query = {'preIF': [reference_name], 'preIF_z': [2], \n 'postIF': [target_name], 'postIF_z': [2],\n 'punctumSize': 2}\n query_list.append(query)\n\n # Run the SACT \n measure_list = aa.calculate_measure_lists(query_list, None, base_dir,\n thresh, resolution, target_filenames)\n \n # Convert measure object to a dataframe \n project_names = ['PSD_M', 'PSD_R']\n df = aa.create_df(measure_list, project_names, target_filenames, reference_filenames)\n print(df)\n\n return df", "def _compute_distances(self, spacy_en_dir=\"en\"):\n nlp = spacy.load(spacy_en_dir)\n df = self._base.get_all_text()\n print(\"tokenizing\")\n tqdm.pandas()\n df[\"noun_tokens\"] = df.sentence.progress_apply(lambda text: ReviewApp._graph_tokenize(text, nlp))\n print(\"building distances\")\n distances = ReviewApp._word_neighbors(df, 1).assign(weight=2).append(\n ReviewApp._word_neighbors(df, 1).assign(weight=1))\n distances = distances.groupby(['w0', 'w1']).weight.sum().reset_index()\n return distances", "def compute_differences(self):\n s1 = self.suffix_1\n s2 = self.suffix_2\n d = self.genescores\n d['pvalue_diff'] = _np.abs(d['pvalue' + s1] - d['pvalue' + s2])\n d['pvalue_log_diff'] = _np.abs(\n _np.log10(d['pvalue' + s1]) - _np.log10(d['pvalue' + s2])\n )\n d['snps_diff'] = _np.abs(d['numSnps' + s1] - d['numSnps' + s2])\n self.genescores = d\n d = self.fgenescores\n d['pvalue_diff'] = _np.abs(d['pvalue' + s1] - d['pvalue' + s2])\n d['pvalue_log_diff'] = _np.abs(\n _np.log10(d['pvalue' + s1]) - _np.log10(d['pvalue' + s2])\n )\n d['snps_diff'] = _np.abs(d['numSnps' + s1] - d['numSnps' + s2])\n self.fgenescores = d\n d = self.pathway\n d['chi2Pvalue_diff'] = _np.abs(\n d['chi2Pvalue' + s1] - d['chi2Pvalue' + s2]\n )\n d['chi2Pvalue_log_diff'] = _np.abs(\n _np.log10(d['chi2Pvalue' + s1]) - _np.log10(d['chi2Pvalue' + s2])\n )\n d['empPvalue_diff'] = _np.abs(\n d['empPvalue' + s1] - d['empPvalue' + s2]\n )\n d['empPvalue_log_diff'] = _np.abs(\n _np.log10(d['empPvalue' + s1]) - _np.log10(d['empPvalue' + s2])\n )\n self.pathway = d", "def get_phon_dist(phon1, phon2, phon_data):\n if phon1 and phon2:\n nr_disagr = 0\n phon1 = phon1.strip(\":\")\n phon2 = phon2.strip(\":\")\n nr_relevant_feats = len([feat for feat in zip(phon_data[phon1],phon_data[phon2]) if feat != (0,0)])\n for i, feat_val in enumerate(phon_data[phon1]):\n if feat_val != phon_data[phon2][i]:\n nr_disagr += 1\n dist = round(nr_disagr / nr_relevant_feats, 2) \n else:\n dist = 1\n return dist", "def calculate_distances(self):\n\n # Matrices with reports vectors and abstracts vectors\n reports = self.model.doc_vecs.loc[self.data.report_ids]\n abstracts = self.model.doc_vecs.loc[self.data.abstract_ids]\n\n # Calculates the distance between each pairs of the matrices\n distances = cdist(reports, abstracts, self.distance_measure)\n distances = np.nan_to_num(distances, nan=np.inf)\n\n distances = pd.DataFrame(distances, index=self.data.report_ids, columns=self.data.abstract_ids)\n\n return distances", "def cal_pairwise_distances(self):\n all_combs = combinations(self.all_user_id, 2)\n all_pairs = [p for p in all_combs]\n self.all_distance = DataFrame(index=range(len(all_pairs)), \\\n columns = [\"pair\", \"uid_a\", \"uid_b\", \"dist_a2b\", \"dist_b2a\"])\n \n if self.scorer_load_counter != self.dist_cal_counter:\n print \"Updating pairwise distances....\"\n for i, pair in enumerate(all_pairs):\n a, b = pair\n \n a_profile = self.users.ix[self.users.ID == a, 2:].as_matrix()\n a_taste = list(self.users.ix[self.users.ID == a, 1])[0]\n b_profile = self.users.ix[self.users.ID == b, 2:].as_matrix()\n b_taste = list(self.users.ix[self.users.ID == b, 1])[0]\n \n dist_a2b = self.sim_scorer.cal_sim(a_profile, b_profile, a_taste)\n dist_b2a = self.sim_scorer.cal_sim(a_profile, b_profile, b_taste)\n _row = [(a, b), a, b, dist_a2b, dist_b2a]\n\n self.all_distance.iloc[i] = _row\n \n self.dist_cal_counter += 1 \n print \"Updating is completed!\"", "def _get_distance(self):\n\n # implement here", "def calculate_distance(measurement, f_query, t_data, p_value=None):\n # Specify features to use in measurement calculations, drop severity as it\n # is not part of the distance calculation and will be used later for\n # feature prediction\n arr_a = f_query[:11]\n arr_b = t_data[:, :11]\n\n if measurement == 'euclidean':\n # Get the sum of all squared subtractions\n values = np.sum((arr_a - arr_b) ** 2, axis=1)\n # Get the square root\n distances = np.sqrt(values)\n # Get the results indices by argsort\n distances_indices = np.argsort(distances)\n\n elif measurement == 'manhattan':\n # Get the sum of all squared subtractions\n distances = np.sum(np.abs(arr_a - arr_b), axis=1)\n # Get the results indices by argsort\n distances_indices = np.argsort(distances)\n\n elif measurement == 'minkowski':\n # Get the sum of all squared to the power of p_value subtractions.\n # Absolute is used here to prevent NumPy runtime warning for invalid\n # value encountered in power for distances calculation\n value = np.sum((abs(arr_a - arr_b) ** p_value), axis=1)\n # Calculate distances by multiplying values from previous equation\n # by 1 over the p_value\n distances = value ** (1 / p_value)\n # Get the results indices by argsort\n distances_indices = np.argsort(distances)\n\n else:\n raise Exception(\"An unknown distance calculation type has been \"\n \"specified, exiting application.\")\n\n if distances.size == 0 or distances_indices.size == 0:\n raise Exception(\"There has been a problem calculating the distances \"\n \"or sorting the distances via argsort, exiting \"\n \"application.\")\n\n return distances, distances_indices", "def test_example_10():\n import pandas as pd\n from tcrdist.repertoire import TCRrep\n\n df = pd.read_csv(\"dash.csv\")\n df2 = pd.read_csv(\"dash2.csv\")\n df = df.head(10) #(1)\n tr = TCRrep(cell_df = df, #(2)\n df2 = df2, \n organism = 'mouse', \n chains = ['alpha','beta'], \n db_file = 'alphabeta_gammadelta_db.tsv')\n \n assert tr.pw_alpha.shape == (10,10) \n assert tr.pw_beta.shape == (10,10)\n\n tr.compute_rect_distances() # (3) \n assert tr.rw_alpha.shape == (10,1924) \n assert tr.rw_beta.shape == (10,1924)\n\n df3 = df2.head(100)\n\n tr.compute_rect_distances(df = tr.clone_df, df2 = df3) # (4) \n assert tr.rw_alpha.shape == (10,100) \n assert tr.rw_beta.shape == (10,100)\n\n tr.compute_rect_distances( df = tr.clone_df.iloc[0:2,], # (5)\n df2 = df3) \n assert tr.rw_alpha.shape == (2,100) \n assert tr.rw_beta.shape == (2,100)", "def testDistance(self):\n\n # testList holds a couple 3-tuple (variable1, variable2, result)\n basicTestList = [(chr(170) * 48, chr(85) * 48, long((chr(255) * 48).encode('hex'), 16))]\n\n for test in basicTestList:\n result = Distance(test[0])(test[1])\n self.failIf(result != test[2], 'Result of _distance() should be %s but %s returned' %\n (test[2], result))", "def DTW(self):\n\n self.N, d1 = self.referenceTS.shape\n self.M, d2 = self.queryTS.shape\n\n if d1!= d2:\n print(\"Number of features not coherent between reference ({0}) and query ({1})\".format(d1,d2))\n return\n\n self.d = d1 # d = dimensionality/number of features\n\n self.distanceMatrix = pairwise_distances(X = self.referenceTS, Y = self.queryTS, metric = self.dist_measure, n_jobs= self.n_jobs)\n\n self.AccumulatedDistanceComputation(step_pattern = \"symmetric2\")", "def measureIt( self, matches, old_data, new_data ):\r\n \r\n distances = [ [] for _ in range( len( new_data ) ) ]\r\n for new_idx, candidates in enumerate( matches ):\r\n for old_idx in candidates:\r\n distances[ new_idx ].append( self._metric( new_data[ new_idx ], old_data[ old_idx ] ) )\r\n \r\n return distances" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just use for one dimension data Wasserstein Distance very effective for capture distribution
def distance(self,data,replica): weight = np.random.multivariate_normal(mean=np.random.normal(size=self.n_dim),cov = np.eye(self.n_dim),size=self.data_dim) weight = weight /np.sqrt(np.sum(weight**2,axis=0,keepdims=True)) data = np.matmul(data,weight) replica = np.matmul(replica,weight) result = [stats.wasserstein_distance(data[:,i],replica[:,i]) for i in range(len(weight))] return np.mean(result) # return np.abs(np.mean(data) - np.mean(replica)) + np.abs(np.std(data) - np.std(replica))
[ "def _apply_distance_filter(image: Image, window: Image, members: np.ndarray, weights: np.ndarray) -> np.ndarray:\n distances = np.zeros(image.size)\n for channel in range(3):\n img_channel = image[:, :][:, :, channel]\n win_channel = np.extract(members, window[:, :][:, :, channel])\n extras = (win_channel, weights)\n distances += ndimage.generic_filter(input=img_channel,\n output=np.float64,\n function=weighted_squared_distance,\n footprint=members,\n mode='mirror',\n extra_arguments=extras)\n return distances", "def distance_map(self):\n um = zeros((self._xdim, self._ydim))\n it = nditer(um, flags=['multi_index'])\n while not it.finished:\n for ii in range(it.multi_index[0] - 1, it.multi_index[0] + 2):\n for jj in range(it.multi_index[1] - 1, it.multi_index[1] + 2):\n if (ii >= 0 and ii < self._xdim and\n jj >= 0 and jj < self._ydim):\n w_1 = self._weights[ii * self._ydim + jj, :]\n w_2 = self._weights[it.multi_index[0] * self._ydim + it.multi_index[1]]\n um[it.multi_index] += torch.sqrt(((w_1 - w_2) ** 2).sum())\n it.iternext()\n um = um / um.max()\n return um", "def calculate_1d_distributions_distances(real_values, fake_values):\n assert (real_values.shape == fake_values.shape)\n\n distances = []\n for detector_idx in range(len(real_values)):\n detector_real_values = real_values[detector_idx]\n detector_fake_values = fake_values[detector_idx]\n distances.append(wasserstein_distance(detector_real_values, detector_fake_values))\n return distances", "def wasserstein_distance(mu1, log_variance1, mu2, log_variance2):\n distance = torch.sum((mu1 - mu2) ** 2, dim=1)\n variance1 = torch.exp(log_variance1)\n variance2 = torch.exp(log_variance2)\n distance += torch.sum(variance1, dim=1) + torch.sum(variance2, dim=1)\n distance -= 2 * torch.sum(torch.sqrt(variance1 * variance2), dim=1)\n return distance", "def wp(data, wt, percentiles):\n assert np.greater_equal(percentiles, 0.0).all(), \"Percentiles less than zero\"\n assert np.less_equal(percentiles, 1.0).all(), \"Percentiles greater than one\"\n data = np.asarray(data)\n assert len(data.shape) == 1\n if wt is None:\n wt = np.ones(data.shape, np.float)\n else:\n wt = np.asarray(wt, np.float)\n assert wt.shape == data.shape\n assert np.greater_equal(wt, 0.0).all(), \"Not all weights are non-negative.\"\n\n assert len(wt.shape) == 1\n n = data.shape[0]\n assert n > 0\n i = np.argsort(data)\n sd = np.take(data, i, axis=0)\n sw = np.take(wt, i, axis=0)\n aw = np.add.accumulate(sw)\n if not aw[-1] > 0:\n print(\"Nonpositive weight sum\")\n w = (aw-0.5*sw)/aw[-1]\n spots = np.searchsorted(w, percentiles)\n o = []\n for (s, p) in zip(spots, percentiles):\n if s == 0:\n o.append(sd[0])\n elif s == n:\n o.append(sd[n-1])\n else:\n f1 = (w[s] - p)/(w[s] - w[s-1])\n f2 = (p - w[s-1])/(w[s] - w[s-1])\n assert f1 >= 0 and f2 >= 0 and f1 <= 1 and f2 <= 1\n assert abs(f1+f2-1.0) < 1e-6\n o.append(sd[s-1]*f1 + sd[s]*f2)\n return o", "def get_distance(self, index):\n return (np.linalg.norm(self.image.astype('float') - self.population[index].image.astype('float'))) / (\n self.image.shape[0] * self.image.shape[1])", "def distances(self):\r\n if not self._distances:\r\n len_ = len(self.X_train)\r\n sample = np.random.randint(len_, size=int(0.1 * len_))\r\n for i in range(0, len(sample), 2):\r\n self._distances.append(\r\n minkowski(self.X_train.loc[i], self.X_train.loc[i + 1])\r\n )\r\n\r\n return self._distances", "def _sliding_window_dist(_mat, _wd, _dist_metric='median'):\n dists = np.zeros(len(_mat))\n for _i in range(len(_mat)):\n if _i - int(_wd/2) < 0 or _i + int(_wd/2) >= len(_mat):\n dists[_i] = 0\n else:\n # get slices\n _left_slice = slice(max(0, _i-_wd), _i)\n _right_slice = slice(_i, min(_i+_wd, len(_mat)))\n # slice matrix\n _intra1 = np.triu(_mat[_left_slice,_left_slice], 1)\n _intra1 = _intra1[np.isnan(_intra1)==False]\n _intra2 = np.triu(_mat[_right_slice,_right_slice], 1)\n _intra2 = _intra2[np.isnan(_intra2)==False]\n _intra_dist = np.concatenate([_intra1[_intra1 > 0],\n _intra2[_intra2 > 0]])\n _inter_dist = _mat[_left_slice,_right_slice]\n _inter_dist = _inter_dist[np.isnan(_inter_dist) == False]\n if len(_intra_dist) == 0 or len(_inter_dist) == 0:\n # return zero distance if one dist list is empty\n dists[_i] = 0\n continue\n # add dist info\n if _dist_metric == 'ks':\n if 'ks_2samp' not in locals():\n from scipy.stats import ks_2samp\n _f = np.sign((np.median(_inter_dist) - np.median(_intra_dist)) )\n dists[_i] = _f * ks_2samp(_intra_dist, _inter_dist)[0]\n elif _dist_metric == 'median':\n m_inter, m_intra = np.median(_inter_dist), np.median(_intra_dist)\n v_inter, v_intra = np.median((_inter_dist-m_inter)**2),\\\n np.median((_intra_dist-m_intra)**2)\n dists[_i] = (m_inter-m_intra) / np.sqrt(v_inter+v_intra)\n elif _dist_metric == 'mean':\n m_inter, m_intra = np.mean(_inter_dist), np.mean(_intra_dist)\n v_inter, v_intra = np.var(_inter_dist), np.var(_intra_dist)\n dists[_i] = (m_inter-m_intra) / np.sqrt(v_inter+v_intra)\n elif _dist_metric == 'normed_insulation':\n dists[_i] = (np.nanmean(_intra_dist) - np.nanmean(_inter_dist)) / (np.nanmean(_intra_dist) + np.nanmean(_inter_dist))\n elif _dist_metric == 'insulation':\n m_inter, m_intra = np.mean(_inter_dist), np.mean(_intra_dist)\n dists[_i] = m_inter / m_intra\n else:\n raise ValueError(f\"Wrong input _dist_metric\")\n\n #dists[dists<0] = 0\n\n return dists", "def DTW(self):\n\n self.N, d1 = self.referenceTS.shape\n self.M, d2 = self.queryTS.shape\n\n if d1!= d2:\n print(\"Number of features not coherent between reference ({0}) and query ({1})\".format(d1,d2))\n return\n\n self.d = d1 # d = dimensionality/number of features\n\n self.distanceMatrix = pairwise_distances(X = self.referenceTS, Y = self.queryTS, metric = self.dist_measure, n_jobs= self.n_jobs)\n\n self.AccumulatedDistanceComputation(step_pattern = \"symmetric2\")", "def dist_filter(xyz, dists, result):\n i = knn2\n il = 0\n end = xyz.shape[0] - knn2\n while i < end:\n dst = ((xyz[il,0] - xyz[i,0])**2 + \\\n (xyz[il,1] - xyz[i,1])**2 + \\\n (xyz[il,2] - xyz[i,2])**2)**0.5\n\n if dst >= abs(xyz[i,2])*depth_multiplier:\n il = i - knn2 + np.argmin(dists[i-knn2:i+knn2+1])\n result[il] = True\n i += knn2 - 1\n else:\n i += 1\n\n return result", "def distance_sample(sample):\n n = sample.shape[0]\n k = sample.shape[1] \n distances = np.zeros((n, k))\n \n row_count = 0\n for i in np.arange(k):\n for j in np.arange(i):\n distances[row_count, :] = sample[i, :] - sample[j, :]\n row_count += 1\n return distances", "def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n # print('X.shape', X.shape)\n # print('X_train.shape', self.X_train.shape)\n for i in range(num_test):\n #######################################################################\n # TODO: #\n # Compute the l2 distance between the ith test point and all training #\n # points, and store the result in dists[i, :]. #\n # Do not use np.linalg.norm(). #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Array of pixels [---R---G---B---]\n # Let total pixels (R + G + B pixels) = p\n # shape = (1, p)\n test_example = X[i]\n\n # X.train.shape = (num_train, p)\n # Broadcasts the test example with the training examples matrix\n diff_squares = np.square(test_example - self.X_train)\n # if i == 0:\n # print('diff_squares.shape', diff_squares.shape)\n # print('test_example[0]', test_example)\n # print('train_example[0]', self.X_train[0])\n # print('diff_squares[0]', diff_squares[0][0])\n\n # In each row, sum across the colums\n # axis=0, sum across rows (go down columns)\n # axis=1, sum across columns (go across row)\n sm = np.sum(diff_squares, axis=1, keepdims=True)\n # if i == 0:\n # print('sm.shape', sm.shape)\n assert sm.shape == (num_train, 1)\n\n temp = np.sqrt(sm)\n # if i == 0:\n # print('temp.shape', temp.shape)\n\n # Transpose column vector temp to row vector\n dists[i, :] = temp.T\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return dists", "def __compute_dist(self, medoids):\n dists = np.zeros((self.n, self.k))\n \n for i in range(self.n):\n dists[i,:] = np.linalg.norm(self.X[i,:] - medoids, axis=1)**2\n \n return dists", "def _dist_matrix(self, x, y):\n dm = dtw_distance(x, y, self.window, self.normalize)\n\n return dm", "def calc_dist(grain1,grain2):\n\tdist=np.sqrt((grain1.xyz[0][0]-grain2.xyz[0][0])**2+(grain1.xyz[0][1]-grain2.xyz[0][1])**2\n +(grain1.xyz[0][2]-grain2.xyz[0][2])**2)\n\treturn dist", "def _calculate_sd(self):\n cost = 0\n for k in range(self.k):\n cost += \\\n distance.cdist(np.array([self.centroids[k]]), np.array([self.previous_centroids[k]]),\n metric=self.metric)[\n 0][0]\n return cost", "def distance(d):\n arr01 = array([\n [1, d],\n [0, 1] \n ], float)\n print(\"The ray transfer matrix for your setup at d distance is\", )\n print(arr01)", "def light_distance(self):\n \treturn self.sight_dist()", "def Dist( X,Xi ):\n\n return np.sqrt(np.sum( (X - Xi)**2,axis = 1))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserting one data. If there is already data with the same ID then overwrite with the new data.
def putData(self, data): try: self.getDataset().insert_one(data) except errors.DuplicateKeyError: updateData = {'$set': data} self.getDataset().update_one( {'_id': data['_id']}, updateData)
[ "def insert_one(self, data):\n self._collection.insert_one(data)", "def insert(self, data):\n if '_rev' in data:\n self.__not_opened()\n raise PreconditionsException(\n \"Can't add record with forbidden fields\")\n _rev = self.create_new_rev()\n if not '_id' in data:\n try:\n _id = self.id_ind.create_key()\n except:\n self.__not_opened()\n raise DatabaseException(\"No id?\")\n else:\n _id = data['_id']\n assert _id is not None\n data['_rev'] = _rev # for make_key_value compat with update / delete\n data['_id'] = _id\n self._insert_indexes(_rev, data)\n ret = {'_id': _id, '_rev': _rev}\n data.update(ret)\n return ret", "def insert_one(self, doc):\n self._assert_open()\n if self._primary_key in doc:\n _id = doc[self._primary_key]\n else:\n _id = doc[self._primary_key] = self._next_default_id()\n if _id in self:\n raise KeyError('Primary key collision!')\n self[_id] = doc\n return _id", "def upsert(self, data):\n\t\turl = '/samples/upsert'\n\t\treturn post(url, data)", "def _single_insert_index(self, index, data, doc_id):\n try:\n should_index = index.make_key_value(data)\n except Exception as ex:\n warnings.warn(\n \"\"\"Problem during insert for `%s`, ex = `%r`, \\\nyou should check index code.\"\"\" % (index.name, ex), RuntimeWarning)\n should_index = None\n if should_index:\n key, value = should_index\n index.insert_with_storage(doc_id, key, value)\n # if value:\n # storage = index.storage\n # start, size = storage.insert(value)\n # else:\n # start = 1\n # size = 0\n # index.insert(doc_id, key, start, size)", "def insert_row(self, data):\n print(\"Inserting row to database\")\n self.cursor.executemany(self.insert_query, data)\n self.connection.commit()", "def insert(self, data):\n self._collection.insert(data)", "def insert(self, entity):\n if getattr(entity, \"_id\") is not None:\n raise ValueError(\"_id is not null\")\n entity.pre_persist()\n self.db[self.collect].insert_one(entity.to_dic())", "def doctest_MOngoDataManager_insertWithExplicitId():", "def insert(self, ID):\n self.cur.execute('INSERT INTO comments (ID) VALUES (?)', [ID])\n self.sql.commit()\n\n log.debug(\"Inserted \" + str(ID) + \" into comment database!\")", "def _insert_id_index(self, _rev, data):\n _id, value = self.id_ind.make_key_value(data) # may be improved\n # storage = self.storage\n # start, size = storage.insert(value)\n # self.id_ind.insert(_id, _rev, start, size)\n self.id_ind.insert_with_storage(_id, _rev, value)\n return _id", "def _create(self):\n data = {\n 'device_id': self.id, \n 'name': \"No name\",\n \"registered\": False\n }\n self._tydb.upsert(data, Query().device_id == self.id)", "def _save(self, table_name, data, id=None):\n # Remove any items with a value of None\n cols = list(data.keys())\n vals = list(data.values())\n cols_clean = list()\n vals_clean = list()\n for i in range(len(cols)):\n val = vals[i]\n if val is not None:\n cols_clean.append(str(cols[i]))\n if isinstance(val, int):\n vals_clean.append(str(val))\n elif isinstance(val, str):\n vals_clean.append('\"{}\"'.format(val))\n else:\n raise TypeError('Expected str or int, got {}.'.format(type(val)))\n # Create the SQL and execute it\n cursor = self.db.cursor()\n if id:\n sql_set = list()\n for i in range(len(cols_clean)):\n sql_set.append('{}={}'.format(cols_clean[i], vals_clean[i]))\n sql = \"UPDATE {tn} SET {set} WHERE id={id}\". \\\n format(tn=table_name, set=','.join(sql_set), id=id)\n cursor.execute(sql, ())\n row_id = None\n else:\n cols_sql = ','.join(cols_clean)\n vals_sql = ','.join(vals_clean)\n sql = \"INSERT INTO {tn} ({cn}) VALUES ({vals})\". \\\n format(tn=table_name, cn=cols_sql, vals=vals_sql)\n row_id = cursor.execute(sql, ()).lastrowid\n\n self.db.commit()\n return row_id if row_id else 0", "def createAndAdd(data):", "def insert(self, data):\n # check if node exists in the tree already\n if self.search(data) is None:\n self.root = self._insert(self.root, data)\n self.size += 1", "def insert_document(self, collection, data, multiple=False):\n if multiple:\n return self.connection[collection].insert_many(data).inserted_id\n else:\n return self.connection[collection].insert_one(data).inserted_id", "def update_or_create(self, pk, data):\n if pk:\n obj = self.get(pk)\n if obj.get('id'):\n print(obj)\n return self.update(obj['id'], data)\n return self.create(data)", "def batch_insert_push(self, batch_data):\n data = self.data\n for key, value in batch_data.items():\n data[key] = value", "def insert_product(self, data):\n query = \"INSERT INTO Products VALUES (NULL, %s, %s, %s, %s, %s)\"\n self.mycursor.execute(query, data)\n self.connector.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get one data with the specific ID.
def getId(self, id): return self.getDataset().find_one({'_id': id})
[ "def read(self, id):", "def getById(self, id):\n for item in self.list: \n if item.getId() == id:\n return item", "def get(self, id): \n student = get(id)\n return student", "def get_by_id(self, id):\n cursor = db.cursor()\n cursor.execute(\n f\"SELECT * FROM {self.table} WHERE id = %(id)s\", {\"id\": id}\n )\n results = [self.model(*row) for row in cursor]\n if len(results) == 0:\n raise ValueError(f\"No instance corresponds to id = {id}\")\n cursor.close()\n return results[0]", "def get_object_by_id(self,id):\n return self.objects[id]", "def get(self, id): \n post = get(id)\n return post", "def FetchById( id ):\n\tresult = None\n\t\n\ttry:\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Trying to grab data from table using Id'))\n\t\tquery = \"SELECT * FROM shop WHERE id = %s;\"\n\t\tdb.cursor.execute( query, ( id, ) )\n\t\tresult = db.cursor.fetchone()\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Successfully grabbed data'))\n\t\t\n\texcept Error as e:\n\t\tLog.error(('SHOPS-Fetch-Id:', e))\n\t\tLog.info(('SHOPS-Fetch-Id:', query))\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Failed to grab data'))\n\treturn result", "def get(self, id):\n return self._opts.setdefault(id, [None])[0]", "def getById (id):\r\n if id in thingsById:\r\n return thingsById[id]\r\n else:\r\n return None", "def fetch( self, obj, id ):\n\t\treturn obj.ById( id )", "def retrieveByID (self, id):\n return self.getOne (\"where departmentID = %d\" % id)", "def find_by_id(self, id):\n\n raise NotImplementedError", "def get_by_id(self, id):\n objects = filter(lambda obj: getattr(obj, self.identifier) == id, self.objects)\n if not self.can_find_many:\n if objects:\n assert(len(objects)==1)# there should only be 1 object with this id\n return objects[0] \n\n if not objects and self.has_default_object:\n return self.get_default_object()\n\n return objects", "def getSingleImageDataById(self, imageId: str) -> ImageData:\n if self.isBlank(imageId):\n return None\n if imageId not in self.nameToImageData:\n logging.warning(f\"{self.getCurrentTime()}: Image data for requested id [{imageId}] not found\")\n return None\n return self.nameToImageData[imageId]", "def find_record_with_id(self, id, **kwargs):\r\n return self.get_scoped_query(**kwargs).filter_by(id=id).first_or_404()", "def get(self, id: int) -> Optional[Item]:\n return self.session.query(Item).get(id)", "def get_entry_from_id(self, id: int):\n for e in self.container:\n if e.id == id:\n return e", "def get(cls, id, using=None, index=None, **kwargs):\n es = cls._get_connection(using)\n doc = es.get(index=cls._default_index(index), id=id, **kwargs)\n if not doc.get(\"found\", False):\n return None\n return cls.from_es(doc)", "def get_dataset_by_id(activity_id, data):\n \n dataset = [i for i in data if i['id'] == activity_id][0]\n return dataset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get timestamp of data with the specific ID.
def getTimestamp(self, id): data = self.getId(id) if isinstance(data['timestamp'], datetime): return data['timestamp'] else: return None
[ "def _get_time_from_id(self) -> datetime:\n return datetime.fromtimestamp(int(self.id.split(' ')[0]) / 1e3)", "def getTimeStamp(idx):\n return data.loc[idx, 'timestamp']", "def __get_update_timestamp(session: scoped_session, type: CachedDataType, id: str = None) -> datetime:\n logger = logging.getLogger(__name__)\n update_record_query = session.query(CacheTimestamp).\\\n filter(CacheTimestamp.data_type == type)\n\n if id != None:\n update_record_query = update_record_query.filter(CacheTimestamp.data_id == id)\n\n update_record = update_record_query.order_by(CacheTimestamp.update_time.desc()).\\\n limit(1).\\\n one_or_none()\n if update_record == None:\n return None\n\n return update_record.update_time", "def _ts(self, script):\n return get_one(self._db, Timestamp, service=script.script_name)", "def timestamp(self) -> int:\n return self.summary[\"timestamp\"]", "def timepoint_id(self):\n return self.identifier[1]", "def _get_timestamp(self):\n return datetime.datetime.now()", "def gettimeid(self):\n mymodel = UpdateTimeModel.create()\n self.timeid = mymodel.id\n return self.timeid", "def guess_timestamp(\n shot_id: Union[str, int]\n) -> int:\n # 1602074877621449728 -> 1602074877.621449728 sec -> 1602074877621449.728 u sec\n if not isinstance(shot_id, int):\n shot_id = int(shot_id)\n timestamp = int(float(shot_id) * 1e-9 * 1e6)\n return timestamp", "def getStartTimeById(self,id:str,format:bool=False) -> object:\n self.topWindow.logger.debug('get start time by id')\n ints=self.getIntervals(ignoreEmpty=False)\n startTime=Utils.parseTime(0)\n thisId=None\n for i in ints:\n thisId = i.get('id')\n if thisId==id:\n break\n duration = self.getDurationById(thisId)\n startTime = startTime + duration\n\n if format:\n return str(startTime)\n else:\n return startTime", "def dataForID(self, sensorID, start_ts=0):\n ts = []\n vals = []\n # in statement below, need to add 1 second to start_ts because the\n # 'rowsForOneID' method uses a >= test.\n for flds in self.db.rowsForOneID(sensorID, start_tm=start_ts+1):\n ts.append(flds['ts'])\n vals.append(flds['val'])\n return np.array(ts), np.array(vals)", "def getTimeStamp(self) -> \"SbUniqueId\":\n return _coin.SoNotList_getTimeStamp(self)", "def timestamp(self):\n def get_tstp(y, mo, d, h, mi, s):\n ts = time.strptime(str(y) + '-' + str(mo) + '-' + str(d) + 'T' + str(h) + ':' + \\\n str(mi) + ':' + str(s), '%Y-%m-%dT%H:%M:%S')\n return time.mktime(ts)\n y = 1970\n mo = 1\n d = 1\n h = 0\n mi = 0\n s = 0\n # syntacic hack - 'while' stmt is not important, but 'break' makes there goto stmt\n while 1:\n if self._content['year'] is None: break\n y = self._content['year']\n if self._content['month'] is None: break\n mo = self._content['month']\n if self._content['day'] is None: break\n d = self._content['day']\n if self._content['hour'] is None: break\n h = self._content['hour']\n if self._content['minute'] is None: break\n mi = self._content['minute']\n if self._content['second'] is None: break\n s = self._content['second']\n break\n if y < 1970: return 0.0\n return get_tstp(y, mo, d, h, mi, s)", "def get_id(self):\n return self.data['id']", "def setTimestamp(self, id):\n updateData = {'$set': {'timestamp': datetime.now()}}\n self.getDataset().update_one(\n {'_id': id}, updateData)", "def timestamp(self,item):\n try:\n self._timestamp[item]\n except:\n self._timestamp[item] = time.time()\n return self._timestamp[item]", "def tsuid(fid):\n\n tdm = TemporalDataMgr()\n return tdm.get_tsuid_from_func_id(func_id=fid)", "def FileTimestamp(klass, filename):\n return str(int(path(filename).st_mtime))", "def timestamp(self) -> Decimal:\n return self.__dict__[\"timestamp\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set timestamp of data to current timestamp with the specific ID.
def setTimestamp(self, id): updateData = {'$set': {'timestamp': datetime.now()}} self.getDataset().update_one( {'_id': id}, updateData)
[ "def __update_timestamp(self, track_id, timestamp):\n\n if track_id:\n if timestamp is None:\n timestamp = datetime.utcnow()\n current.db(self.table.track_id == track_id).update(track_timestmp=timestamp)", "def setTimeStamp(self, ts):\r\n \tself.timeStamp = ts", "def __save_update_timestamp(session: scoped_session, type: CachedDataType, id: str = \"\") -> None:\n ts = session.query(CacheTimestamp).filter(CacheTimestamp.data_type == type).filter(CacheTimestamp.data_id == id).one_or_none()\n if ts == None:\n session.add(CacheTimestamp(data_type = type, data_id = id))\n else:\n ts.update_time = datetime.utcnow();\n session.commit()", "def setSetStamp(self, long: int) -> None:\n ...", "def set_timestamp(self, timestamp):\n self.timestamp = LogEntry.normalize_timestamp(timestamp)", "def set(self, id, data):\n self.store.set_json(self.session_key(id), data)", "def update_id(self,id):\n self.id = id", "def mark_time(modelobj, user_id):\n if modelobj.created_date:\n modelobj.created_date = datetime.now()\n modelobj.created_by = user_id\n else:\n modelobj.updated_date = datetime.now()\n modelobj.updated_by = user_id", "def update(self, *args, **kwargs):\n utcnow = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S UTC\")\n self.updated_at = utcnow\n\n super(ModelBaseWithTimeStamp, self).update(*args, **kwargs)", "def data_id(self, data_id: str):\n\n self._data_id = data_id", "def set_to_timestamp_safe(self, value):\n try:\n if value is not None:\n self.to_timestamp = int(value)\n except:\n # do nothing, value will not have changed\n pass", "def _id(self, _id):\n self.__id = _id", "def update_identification(self):\n\n self.identification = system_to_ntp_time(time.time())", "def setSetStampMillis(self, long: int) -> None:\n ...", "def set(self, datetime, value):\n\t\tself._data[TimeDivision.timelabel(datetime)] = value", "def save(self, *args, **kwargs):\n self.updated_ts = datetime.utcnow()\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n utcnow = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S UTC\")\n self.created_at = utcnow\n self.updated_at = utcnow\n\n super(ModelBaseWithTimeStamp, self).save(*args, **kwargs)", "def updateLastSeen(self, id):\n timestamp = int(time.time())\n self.clients[id].lastSeen = timestamp\n self.logger.log(\"Updated lastSeen for client %d to %d\" % (id, timestamp))", "def setDataIDAttribute(self, dataID: 'char const *') -> \"void\":\n return _coin.ScXMLAssignElt_setDataIDAttribute(self, dataID)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set one label in certain index from data with specific ID.
def setData(self, data): updateData = {'$set': {'label.'+str(data['index']): data['tag']}} self.getDataset().update_one( {'_id': data['_id']}, updateData)
[ "def set_label_id(self, label_id):\n self.label_id = label_id", "def select_label(self, label_id: int) -> Label:", "def set_label(self, label):", "def _translate_label(self, data_id, set_label):\n if self._label_translation_table is None:\n self._label_translation_table = {\n self._ID_ALLSETS_X: self._translate_label_allsets_x,\n }\n\n assert data_id in self._label_translation_table, \\\n \"unknown data_id specified for label translation\"\n\n translation_function = self._label_translation_table[data_id]\n return translation_function(set_label)", "def change_text_label(self, label_id, txt):\n self.get_object(label_id).set_text(txt)", "def update_dict(label):\n if update:\n nonlocal index\n if label not in labels_dict:\n labels_dict[label] = index\n index += 1", "def update_labelid(self):\n local_sql = MysqlConnecttion(\"local\")\n # booth_sql = MysqlConnecttion(\"booth\")\n label_map = {}#{\"word\":labelid}\n query = r\"select Word, Labelid from labels\"\n rows = local_sql.excute_with_result(query)\n for row in rows:\n label_map[row[0]] = row[1]\n query = r'''(SELECT mqid, mqid2, labelid, mq_title_vector_short FROM magic_quadrants where removed = 0)'''\n mq_vector_map = {}#{\"mqid\":\"word vector (short)\"}\n label_tmap = {}\n rows = local_sql.excute_with_result(query)\n for row in rows:\n mq_vector_map[row[0]] = row[3]\n label_map[row[0]] = row[2] \n for mq_id in mq_vector_map:\n json_word_set = mq_vector_map[mq_id]\n if json_word_set == None or json_word_set == \"\":\n json_word_set = \"{}\"\n word_map = json.loads(json_word_set)\n label_list = []\n for word in word_map:\n if word in label_map: \n label_list.append(str(label_map[word]))\n labels = \";\".join(label_list)\n query = r\"UPDATE Magic_Quadranta set Labelid = '' WHERE MQID = '%s' and removed = 0\"%(mq_id)\n query = r\"UPDATE Magic_Quadrants set Labelid = '%s' WHERE MQID = '%s' and removed = 0\"%(labels, mq_id)\n local_sql.excute(query)\n # booth_sql.excute(query)", "def setData(self, id0, x, y):\n id = id0 + self.offset\n self._checkMaxId(id)\n self.indexList[id] = x\n self.dataList[id] = y\n self.replot = True", "def set_label(self, label):\n # check label makes sense\n if not isinstance(label, np.ndarray):\n raise TypeError(\"Label should be numpy array\")\n\n # only fill in attribute if the right size\n if label.shape[0] == self.points.shape[0]:\n self.label = label\n else:\n raise ValueError(\"Scan and Label don't contain same number of points\")\n\n self.do_label_projection()", "def set_label(self, label):\n # check label makes sense\n if not isinstance(label, np.ndarray):\n raise TypeError(\"Label should be numpy array\")\n\n # only fill in attribute if the right size\n if label.shape[0] == self.points.shape[0]:\n self.sem_label = (label // 1000).astype(np.uint8) # semantic label in lower half\n self.inst_label = (label % 1000).astype(np.uint8) # instance id in upper half\n cls, cnt = np.unique(self.sem_label, return_counts=True)\n unknown_clss = [9,12,18,22]\n for unknown_cls in unknown_clss:\n if unknown_cls in np.unique(self.sem_label):\n print(unknown_cls, cnt[cls==unknown_cls])\n else:\n print(\"Points shape: \", self.points.shape)\n print(\"Label shape: \", label.shape)\n raise ValueError(\"Scan and Label don't contain same number of points\")\n\n # sanity check\n assert((self.inst_label + (self.sem_label * 1000) == label).all())\n\n # self.augmentor()\n\n if self.project:\n self.do_label_projection()", "def add(self,label):\n\t\tif label not in self._label_to_index:\n\t\t\tself._label_to_index[label] = self.num_labels\n\t\t\tself._index_to_label[self.num_labels] = label\n\t\t\tself.num_labels += 1", "def set_label(self, x_label, y_label, index):\r\n # Store the latest setting of labels as the default labels\r\n self.x_label = x_label\r\n self.y_label = y_label\r\n try:\r\n self.sub_plots[index].axes.set_xlabel(x_label)\r\n self.sub_plots[index].axes.set_ylabel(y_label)\r\n except IndexError:\r\n raise IndexError, \"No sub-plot exists at index:{0!s}\".format(index)", "def set_labels(self,label):\n ax = self.figurecavas.figure.axes[0]\n ax.set_xlabel(label[0])\n ax.set_ylabel(label[1])", "def setlabel(self, key, label):\n self.wallet.set_label(key, label)", "def _index_label(self, label: Any) -> List[int]:\n raise NotImplementedError", "def update_labelid_vert(self):\n local_sql = MysqlConnecttion(\"local\")\n booth_sql = MysqlConnecttion(\"booth\")\n label_map = {}#{\"word\":labelid}\n query = r\"select Word, Labelid from labels WHERE Geogr = 0 and Vert = 1 and Mktseg = 0\"\n rows = local_sql.excute_with_result(query)\n for row in rows:\n label_map[row[0]] = row[1]\n query = r'''(SELECT mqid, docid, labelid, mq_title_vector_short FROM magic_quadrants where removed = 0)'''\n mq_vector_map = {}#{\"mqid\":\"word vector (short)\"}\n label_tmap = {}\n docid_map ={}\n rows = local_sql.excute_with_result(query)\n for row in rows:\n mq_vector_map[row[0]] = row[3]\n docid_map[row[0]] = row[1]\n label_map[row[0]] = row[2]\n query = r'''(SELECT title_short, docid FROM doc_deatail_vector)'''\n cool_map ={}\n rows = local_sql.excute_with_result(query)\n for row in rows:\n cool_map[row[1]] = row[0]\n for mq_id in mq_vector_map:\n json_word_set = mq_vector_map[mq_id]\n if json_word_set == None or json_word_set == \"\":\n json_word_set = \"{}\"\n word_map = json.loads(json_word_set)\n label_list = []\n for word in word_map:\n if word in label_map:\n label_list.append(str(label_map[word]))\n json_word_set = cool_map[docid_map[mq_id]]\n if json_word_set == None or json_word_set == \"\":\n json_word_set = \"{}\"\n word_map = json.loads(json_word_set)\n for word in word_map:\n if word in label_map:\n label_list.append(str(label_map[word]))\n label_list = list(set(label_list))\n length = len(label_list)\n labels = \";\".join(label_list)\n query = r\"update new_magic_quadrants set Vert_label = '%s' where MQID = '%s'\"%(labels, mq_id)\n local_sql.excute(query)\n booth_sql.excute(query)", "def setLabelInfo(self, label, strInfo) -> None:\n ...", "def label(self, name):\n self.labels[name] = self.node\n return self", "def set_labels(self,label:dict):\n self.label_dict = label\n print(\"[INFO] Label dictionary : \",label)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove all duplicate data with specific text.
def removeDuplicateText(self, text): self.getDataset().delete_many({"full_text": text})
[ "def delete_common_words(data):", "def delete_repeats(text):\n modified_text = ''\n for letter in text:\n for symbol in modified_text:\n if symbol == letter:\n break\n else:\n modified_text += letter\n return modified_text", "def remove_duplicates(self):\n for data_list in self.converted_data:\n for index, item in enumerate(data_list):\n if index != 0:\n date1 = data_list[index - 1][0]\n date2 = data_list[index][0]\n # If dates of two entries in a row are the same\n if date1 == date2:\n # Deleting one of them\n data_list.pop(index)", "def remove_duplicates():\n titles = set()\n click.echo('articles (before): {}'.format(Article.query.count()))\n for a in Article.query.all():\n if a.title in titles:\n db.session.delete(a)\n titles.add(a.title)\n db.session.commit()\n click.echo('articles (after): {}'.format(Article.query.count()))", "def remove_duplicate_titles(page, responses):\n ptitle = find_key(\"title\", page)\n\n for name,item in page:\n if name == \"question\" and validation.would_be_displayed(item, responses):\n if item.get(\"title\") == ptitle:\n del item[\"title\"]\n break", "def find_unique(self):\n self.unique_data = self.data.drop_duplicates(subset=\"find_unique\")", "def rm_duplicates(self):\n # NOTE: this implementation works only for a numerical\n # `ExpressionValue`, in case this column is something other than\n # numeric (i.e. a string), this function has to be overwritten by the\n # subclasses\n src_table = self.get_cur_tmp_table()\n dst_table = self.next_tmp_table(\"no_dupl\")\n # TODO: count how many are removed, and log somewhere\n sqlquery = ('SELECT Gene, Type, '\n 'MAX(ExpressionValue) AS ExpressionValue '\n 'FROM ' + src_table + ' '\n 'GROUP BY Gene, Type')\n sql.new_table_from_query(dst_table, sqlquery, self.sql_conn)", "def deduplicate(json,attr=\"neoTitle\") :\n nd=[]\n already=set()\n before=len(json[\"response\"][\"docs\"])\n if not callable(attr) :\n def compare(doc) :\n try :\n return doc[attr]\n except Exception as er :\n return er\n else :\n compare=attr\n for d in json[\"response\"][\"docs\"] :\n da=compare(d)\n if da not in already :\n already.add(da)\n nd.append(d)\n json[\"response\"][\"docs\"]=nd\n logging.debug(\"deduplicated %s ->%s entries\" % (before,len(nd)))\n return json", "def clean(text, config=get_config(include_project_config=False)):\n return remove_elements(text, config)", "def drop_tweetsduplicates(self):\n dates = self.tweets[\"date\"].unique()\n # define a dataframe which will contain the cleaned tweets\n clean_df1 = pd.DataFrame(columns=[\"date\", \"text\"])\n for d in dates:\n # for each day we drop all the duplicated tweets\n df_ = self.tweets[self.tweets[\"date\"] == d]\n # append the slice of cleaned tweets for the dat d in the the clean dataframe\n clean_df1 = clean_df1.append(self.tweets_sim(df_))\n return clean_df1", "def remove_dupes(reviews):\n \n if(len(reviews) == 0): \n return(reviews)\n\n review_names = [r.artist() + \" - \" + r.album() for r in reviews]\n found_dupe = True\n while found_dupe:\n for i in range(len(reviews)):\n if review_names.count(review_names[i]) > 1:\n review_names.pop(i)\n reviews.pop(i)\n break\n if i == max(range(len(reviews))):\n found_dupe = False\n return(reviews)", "def filter_unique(inp='../data/vrex_1week_long_text_filter_by_re.queries',\n outp='../data/vrex_1week_long_text_filter_unique.queries'):\n with open(inp) as f:\n with open(outp, 'wb') as fout:\n uniq_lines = OrderedDict()\n for i, aline in enumerate(f):\n txt = aline.decode('utf8')\n if i % 10000 == 0:\n print(i)\n if not uniq_lines.get(txt):\n uniq_lines[txt] = 1\n else:\n uniq_lines[txt] += 1\n for i, uqlines in enumerate(uniq_lines):\n fout.write(str(i) + '\\t' + uqlines.strip().encode('utf8') + '\\t' + str(uniq_lines[uqlines]) + '\\n')\n fout.flush()", "def exclude(text):\n\n unwanted = [\"\"\"\"\"\"]", "def removeHashTag(text=[]):\n\tcleanedText = []\n\tfor word in text:\n\t\tword = word.replace(\"#\",\"\")\n\t\tcleanedText.append(word)\n\treturn cleanedText", "def __drop_redundancy(df,col,txt): # @vaihauWILIAMU\n df[col] = pd.Series([ ''.join(elt.split(txt)) for elt in df[col].tolist() ])\n return df", "def remove_duplicates(file):\n start_time = time.time()\n articles = set() #Create a set object.\n lines_removed = 0\n with open(file, 'r') as fin:\n lines = fin.readlines() #Read in the lines of the file.\n with open(file, 'w') as fout:\n for line in lines:\n n = len(articles)\n articles.add(line) #Try to add each line to the set.\n if len(articles) != n:\n fout.write(line) #If the line was added, write it to the file.\n else:\n lines_removed += 1 #Otherwise increment the count of lines removed.\n print(lines_removed, \"duplicate lines removed in\", time.time()-start_time, \"seconds.\")\n return lines_removed", "def remove_duplicates_and_sort_tags(self):\n self.tags = list(set(self.tags))\n self.sort_tags()", "def remove_dup():\n matched_items = {}\n new_items = []\n doc = bson.json_util.loads(open(FILE_LOCATION).read())\n for entry in sorted(doc, key=lambda x: (x.get(\"letter\"), int(x.get(\"number\")))):\n key = (entry.get(\"letter\"), int(entry.get(\"number\")))\n if key not in matched_items:\n new_items.append(entry)\n matched_items[key] = 1\n with open('courses.sorted.json', 'w') as outfile:\n json.dump(new_items, outfile, sort_keys=True, indent=4, separators=(',', ': '))", "def remove_duplicate_rows(self):\n\t\tresult = []\n\t\tfor row in self.__rows:\n\t\t\tif row in result: continue\n\t\t\tresult.append(row)\n\t\tself.__rows = result\n\n\t\tself.__row_size = len(self.__rows)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method return the nodeList of the the node without any user
def nodeList_without_user(self): nlwu = [] for n in self.nodeList: if type(n[0]) is Node: nlwu.append(n) return nlwu
[ "def getNodeList(self):\n nodelist = self.response[1] or []\n return [Node(*nodeple) for nodeple in nodelist]", "def get_nodes(self):\n nodes = []\n for node in self.nodes:\n nodes.append(node)\n return nodes", "def get_nodes(self) :\n n = []\n self._gather_nodes(self.root,n) \n return n", "def getChildNodes(self):\n return NodeList()", "def other_nodes(self):\n return [node for node in self.nodes if not node.is_role('shadow')]", "def get_all_nodes(self):\n\n return list(self.graph.keys())", "def get_nodes(self):\n\t\treturn node_names(self.network)", "def get_blanknodes(nodes):\n\n blanknodes = []\n for node in nodes:\n if is_blanknode(node):\n blanknodes.append(node)\n # print(\"**: %s\" % node)\n # else:\n # print(\"node: %s\" % node)\n return blanknodes", "def getActiveNodes(self):\n\t\treturn self.activeNodes", "def _available_nodes(self):\n list_ni = []\n list_degree = []\n for ni in self.graph.nodes():\n # \"unvisited degree\" for each node is the total number of edges\n # minus the number of 'visited' edges\n degree_ni = self.graph.degree(ni) - self.graph.degree(ni, weight='visited')\n if degree_ni > 0:\n list_ni.append(ni)\n list_degree.append(degree_ni)\n\n return list_ni, list_degree", "def get_nodes(self, role, is_role, nodes=None):\n if role not in self.VALID_ROLES:\n return []\n nodes = nodes or self.nodes\n return [node for node in nodes if node.is_role(role) == is_role]", "def getNetworkTails(self):\n nodesWithNoConnectedInput = []\n\n for node in self.nodes:\n if not node.isConnected():\n nodesWithNoConnectedInput.append(node)\n else:\n connected = False\n for port in node.portsIn:\n if port.isConnected():\n connected = True\n if not connected:\n nodesWithNoConnectedInput.append(node)\n return nodesWithNoConnectedInput", "def pick_nodes(self):\n if self.nodes == []:\n return []\n return self.nodes\n # return sample(self.nodes,1)", "def node_ids(self):\n try:\n return self._node_ids()\n except:\n #raise\n raise RuntimeError('error processing nodes for \\n%s' % str(self))", "def nodes_iter(self):\n\n return self.nodes()", "def get_alive_nodes(self):\n return [node for node in self[0:-1] if node.alive]", "def nodes(self):\n return self.nodes_spanned()", "def nodes(self):\n return get_objects_for_group(self.member_group, READ_NODE, Node)", "def nodes(self):\n # return [k for k in self.agents]\n return self.name_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method return the attached user of a node if it has one
def attached_user(self): for n in self.nodeList: if type(n[0]) is User: return n return None
[ "def user(self):\r\n try:\r\n return User.objects.get(username=self.username)\r\n except User.DoesNotExist:\r\n return None", "def get_current_user(self):\n return self.graph.users.get(int(self.get_secure_cookie('eid')))", "def user(self) -> Optional[str]:\n if self.logged_in():\n return self.username()\n return None", "def _get_createdBy(self) -> \"adsk::core::Ptr< adsk::core::User >\" :\n return _core.DataFile__get_createdBy(self)", "def get_single_user():", "def user(self) -> Optional[dict]:\n return self._get('user')", "def user(self) -> Optional[str]:\n\n if header := self.data.get(\"User\"):\n return header.name\n return None", "def LastPostUser(self):\n\n\t\tlast_post = forum_post.objects.filter(thread = self).order_by('-date_created')\n\t\tif len(last_post) == 0:\n\t\t\treturn 'Nobody'\n\t\t#End\n\n\t\treturn last_post[0].user", "def active_user():\n return getattr(_request_ctx_stack.top, 'user', None)", "def getUser(self):\n users = User.objects.filter(username__iexact=self.trigramme)\n if users.count() >= 1:\n return users[0]", "def GetUser(self, guid):\n self.usrLock.acquire()\n \n user = None\n for candidate in self.users: #Match the user\n if candidate.guid == guid:\n user = candidate\n break\n \n self.usrLock.release()\n return user", "def user(self):\n return self.parsed_prefix.user", "def get(uid):\n if uid in User.users:\n return User.users[uid]\n return None", "def getAttachedNode(self) -> \"SoNode *\":\n return _coin.SoNodeSensor_getAttachedNode(self)", "def LastPostUser(self):\n\n\t\tlast_thread = forum_thread.objects.filter(forum = self).order_by('-date_created')\n\t\tif len(last_thread) == 0:\n\t\t\treturn 'Nobody'\n\t\t#End\n\t\tlast_post = forum_post.objects.filter(thread = last_thread[0]).order_by('-date_created')\n\t\tif len(last_post) == 0:\n\t\t\treturn 'Nobody'\n\t\t#End\n\n\t\treturn last_post[0].user", "def get_user(self, socket):\r\n\t\treturn self.users.get(socket, None)", "def get_user(self, nickname):\n \n return self.users[nickname][0]", "def snmp_user(self):\n return self._snmp_user", "def get_owner(self):\n if self.user is None:\n self.user = self.gh.get_user()\n if self.org is None:\n return self.user\n if self.owner is None:\n try:\n self.owner = [org for org in self.user.get_orgs() \\\n if org.login.lower() == self.org.lower()][0]\n except Exception as e:\n raise BaseException(\"Could not find organization '\" + str(self.org) + \\\n \"' because: \" + str(e))\n\n return self.owner" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method return the nodeList of the the node without any user and sorted by increasing idNode
def arrange_node(self): idList = [] arrangedNode = [] for n in self.nodeList_without_user(): idList.append(n[0].idNode) idList.sort() for i in idList: for n in self.nodeList: if i == n[0].idNode: arrangedNode.append(n) return arrangedNode
[ "def nodeList_without_user(self):\r\n nlwu = []\r\n for n in self.nodeList:\r\n if type(n[0]) is Node:\r\n nlwu.append(n)\r\n return nlwu", "def get_nodes(self) :\n n = []\n self._gather_nodes(self.root,n) \n return n", "def get_nodes(self):\n nodes = []\n for node in self.nodes:\n nodes.append(node)\n return nodes", "def get_nodes(graph):\r\n nodes = []\r\n for i in graph:\r\n for j in i:\r\n if j not in nodes:\r\n nodes.append(j)\r\n nodes.sort()\r\n return nodes", "def node_ids(self):\n try:\n return self._node_ids()\n except:\n #raise\n raise RuntimeError('error processing nodes for \\n%s' % str(self))", "def node_ids(self):\n return [self.ni_id, self.nj_id, self.nk_id, self.nl_id]", "def node_chain(self, node_id: int) -> List[Node]:\n pass", "def _getNodes(self):\n kids = []\n secs = [self]\n while secs:\n sec = secs.pop()\n for kid in sec._children:\n if isinstance(kid, Section):\n secs.append(kid)\n kids.append(kid)\n return kids", "def pick_nodes(self):\n if self.nodes == []:\n return []\n return self.nodes\n # return sample(self.nodes,1)", "def get_next_nodes(self, n):\n return # osid.hierarchy.Node", "def get_all_nodes(self):\n\n return list(self.graph.keys())", "def levelorder_i(cls,root):\n nodes = []\n q = []\n q.append(root)\n while len(q) > 0:\n node = q.pop(0) # bottom of q (ie fifo)\n nodes.append(node) \n if node.l is not None:q.append(node.l)\n if node.r is not None:q.append(node.r)\n pass\n return nodes", "def getNodeList(self):\n nodelist = self.response[1] or []\n return [Node(*nodeple) for nodeple in nodelist]", "def nodes(self):\n return self.nodes_spanned()", "def get_nodes(self):\n self.POS = set([x[0].name for x in self.edges])\n child_nodes = set([x[1].name for x in self.edges])\n self.tokens = child_nodes - self.POS", "def get_nodes(self):\n\t\treturn node_names(self.network)", "def get_selected_nodes(self,etreeElement,id_list):\n assert type(etreeElement) == etree.Element, \"etreeElement must be an etree.Element!\"\n assert type(id_list) == id_list, \"id_list must be a list of id's to search for\"\n nodes = []\n for id in id_list:\n path = '//*[@id=\"%s\"]' % id\n for node in etreeElement.xpath(path, namespaces=inkex.NSS):\n nodes.append(node)\n return nodes", "def _available_nodes(self):\n list_ni = []\n list_degree = []\n for ni in self.graph.nodes():\n # \"unvisited degree\" for each node is the total number of edges\n # minus the number of 'visited' edges\n degree_ni = self.graph.degree(ni) - self.graph.degree(ni, weight='visited')\n if degree_ni > 0:\n list_ni.append(ni)\n list_degree.append(degree_ni)\n\n return list_ni, list_degree", "def nodeIdList(treeContent):\n list = []\n for node in treeContent:\n list.append(int(node['nodeId']))\n return list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method takes a target node id in parameter and return the time to get to the target node from the node this method is recursive and can also be called by the time_to_node method in the user class
def time_to_node(self, target_id_node, visited_nodes=[], nodes_to_visit=[], time=0, max_time=None): if not len(nodes_to_visit) == 0: del nodes_to_visit[0] if self.idNode == target_id_node: if max_time == None: max_time = time elif time < max_time: max_time = time visited_nodes.append(self) for n in self.nodeList_without_user(): if not n[0] in visited_nodes: nodes_to_visit.append([n[0], time + n[1]]) if len(nodes_to_visit) == 0: return max_time return nodes_to_visit[0][0].time_to_node(target_id_node, visited_nodes, nodes_to_visit, nodes_to_visit[0][1], max_time)
[ "def travel_time(data, from_node, to_node):\n if from_node == to_node:\n travel_time = 0\n else:\n travel_time = distance(\n data.locations[from_node],\n data.locations[to_node]) / data.vehicle.speed\n return travel_time", "def time_callback(from_node, to_node):\r\n serv_time = service_time(from_node)\r\n trav_time = travel_time(from_node, to_node)\r\n return serv_time + trav_time", "def _find_node(self, node, target):\n if node is None:\n return None\n if node._data == target:\n return node\n if target < node._data:\n return self._find_node(node._left, target)\n else: # so target > node.data\n return self._find_node(node._right, target)", "def calculate_time_to(self, node, speed):\n distance = self.calculate_distance_to(node)\n\n time = distance / speed\n\n return time", "def execute_node1(self, node_id):\n self.nodes[node_id]['counter'] += 1\n self.nodes[node_id]['time'] -= time.time()", "def NodeId(self) -> int:", "def get_current_temporal_node(self):\n raise NotImplementedError", "def select_node(self, node_id: int) -> Node:", "def _get_node(self, id_):\n return self._nodes[id_]", "def _get_time_from_id(self) -> datetime:\n return datetime.fromtimestamp(int(self.id.split(' ')[0]) / 1e3)", "def target_info(self, id):\n for target in self.target:\n if target['target_id'] == id:\n return target", "def get_twin(self, node):\n assert node in self.target_nodes\n return self.nodes[self.number_of_nodes+node]", "def shortest_path(source, target):\n ##for testing\n # source=person_id_for_name(\"Lupita Nyong'o\")\n # target=person_id_for_name(\"Joan Cusack\")\n ## \n explored=[]\n frontier=QueueFrontier()\n init_state=Node(state=source,parent=None,action=None)\n frontier.add(init_state)\n success=None\n while frontier.empty ==False or success is None:\n if frontier.contains_state(target) == True:\n success=frontier.search(target)\n print(\"success\")\n else:\n removal=frontier.remove_all()\n for node in removal:\n for i in neighbors_for_person(node.state):\n n=Node(i[1],node.state,i[0])\n if any(node==n for node in explored)==False and\\\n frontier.contains_state(n.state)==False:\n frontier.add(n)\n explored.append(node)\n removal.clear()\n if frontier.empty==True:\n return None\n elif success.parent==source:\n return [(success.action,success.state)]\n else:\n movie_path=[(success.action,success.state)]\n node_path=[success]\n while node_path[0].parent != source:\n p_node=search_for_parent(explored,node_path[0].parent) \n movie_path.insert(0,(p_node.action,p_node.state))\n node_path.insert(0,p_node)\n return movie_path", "def node_start_time(self):\n return self._node_start_time", "def getTargetID(self) -> int:\n ...", "def find_shortest_path(self, start_id, target_id):\n if not self.contains_id(start_id) or not self.contains_id(target_id):\n raise KeyError(\"One or both vertices are not in the graph!\")\n\n # vertex keys we've seen before and their paths from the start vertex\n vertex_id_to_path = {\n start_id: [start_id] # only one thing in the path\n }\n\n # queue of vertices to visit next\n queue = deque() \n queue.append(self.get_vertex(start_id))\n\n # while queue is not empty\n while queue:\n current_vertex_obj = queue.popleft() # vertex obj to visit next\n current_vertex_id = current_vertex_obj.get_id()\n\n # found target, can stop the loop early\n if current_vertex_id == target_id:\n break\n\n neighbors = current_vertex_obj.get_neighbors()\n for neighbor in neighbors:\n if neighbor.get_id() not in vertex_id_to_path:\n current_path = vertex_id_to_path[current_vertex_id]\n # extend the path by 1 vertex\n next_path = current_path + [neighbor.get_id()]\n vertex_id_to_path[neighbor.get_id()] = next_path\n queue.append(neighbor)\n # print(vertex_id_to_path)\n\n if target_id not in vertex_id_to_path: # path not found\n return None\n\n return vertex_id_to_path[target_id]", "def get_libgen_translate_script_execution_time_of_target_node(target_node_name,wo_omu_node_name):\n time_index = 'Libgen_translate.py::FULL_TRANSLATE:'\n command1='grep %s /var/log/master/syslog |grep %s'%(time_index,target_node_name)\n command2='grep %s /tmp/copyconfig.out |grep %s'%(time_index,target_node_name)\n\n cmd = 'echo $HW_PLATFORM'\n hw_type = connections.execute_mml_without_check(cmd)\n hw_type = hw_type.strip()\n index = target_node_name.split('-')[0].strip()\n \n if (wo_omu_node_name.count(index) == 0 and hw_type == 'ATCA'):\n BuiltIn().run_keyword(\"Change Node Env\", target_node_name)\n out = connections.execute_mml_without_check(command2)\n else:\n BuiltIn().run_keyword(\"Change Node Env\", wo_omu_node_name)\n out = connections.execute_mml_without_check(command1)", "def getOriginalTravelTime(edgeId: str):\n return traci.edge.getTraveltime(edgeId)", "def find_spanning_tree_path(self, from_node, to_node):\r\n # Follow the tree's links back from to_node to from_node.\r\n path_nodes = []\r\n path_links = []\r\n current_node = to_node\r\n while current_node != from_node:\r\n # Add this node to the path.\r\n path_nodes.append(current_node)\r\n\r\n # Find the previous node.\r\n prev_node = current_node.from_node\r\n\r\n # Find the link that leads to current_node.\r\n prev_link = None\r\n for link in prev_node.links:\r\n if link.node1 == current_node:\r\n prev_link = link\r\n break\r\n\r\n # Make sure we found the link.\r\n assert prev_link != None\r\n\r\n # Add the link to the path.\r\n path_links.append(prev_link)\r\n\r\n # Move to the next node.\r\n current_node = prev_node\r\n\r\n # Add the start node.\r\n path_nodes.append(from_node)\r\n\r\n # Reverse the order of the nodes and links.\r\n path_nodes.reverse()\r\n path_links.reverse()\r\n\r\n # Unmark all nodes and links.\r\n self.deselect_nodes()\r\n self.deselect_links()\r\n\r\n # Marks the path's nodes and links.\r\n for node in path_nodes:\r\n node.visited = True\r\n for link in path_links:\r\n link.visited = True\r\n\r\n # Calculate the cost of the path.\r\n cost = 0\r\n for link in path_links:\r\n cost += link.cost\r\n\r\n # Return the cost.\r\n return cost, path_nodes, path_links" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function converts a value to an URL parameter compatible with the NHL API.
def to_url_param(val): if isinstance(val, IUrlParam): return val.to_url_param() if isinstance(val, (date, datetime)): return val.strftime("%Y-%m-%d") if isinstance(val, (list, tuple)): return ",".join(map(to_url_param, val)) if isinstance(val, int): return str(val) if isinstance(val, str): return val else: raise TypeError("Cannot convert '{}' to url param".format(type(val)))
[ "def query_param(self, key, value=None, default=None, as_list=False):\r\n parse_result = self.query_params()\r\n if value is not None:\r\n parse_result[key] = value\r\n return URL._mutate(\r\n self, query=unicode_urlencode(parse_result, doseq=True))\r\n\r\n try:\r\n result = parse_result[key]\r\n except KeyError:\r\n return default\r\n if as_list:\r\n return result\r\n return result[0] if len(result) == 1 else result", "def writeUrlValue(self, value):\n raise NotImplementedError(\"TODO. Need type check.\")", "def change_parameter(url, parameter, new_value):\n url_query = urlparse(url).query\n query = dict(parse_qsl(url_query))\n\n if query:\n for name, _ in query.items():\n if name == parameter:\n query[name] = new_value\n\n encoded = \"?\" + urlencode(query)\n return urljoin(url, encoded)\n else:\n return url", "def label2Param(cls, label):\n return urllib.quote_plus(label)", "def add_or_replace_parameter(url, name, new_value):\n parsed = urlsplit(url)\n args = parse_qsl(parsed.quhttps keep_blank_values=True)\n\n new_args = []\n found = False\n for name_, value_ in args:\n if name_ == name:\n new_args.append((name_, new_value))\n found = True\n else:\n new_args.append((name_, value_))\n\n if not found:\n new_args.append((name, new_value))\n\n query = urlencode(new_args)\n return urlunsplit(parsed._replace(query=query))", "def _append_id_name(self, url, value):\n return '{}/id/{}'.format(url, value) if self._is_int(value) else '{}/name/{}'.format(url, value)", "def _params(self, params):\r\n return urllib.urlencode(params)", "def make_url(name_or_url):\n\n if isinstance(name_or_url, basestring):\n return _parse_rfc1738_args(name_or_url)\n else:\n return name_or_url", "def override_query(context, key, value):\n request = context['request']\n current_q = request.GET.copy()\n current_q.__setitem__(key, value)\n return current_q.urlencode()", "def construct_url(context, request):", "def build_url(*args, **kwargs):\n get = kwargs.pop(\"get\", {})\n pk = kwargs.pop(\"pk\", {})\n url = reverse(*args, kwargs=pk)\n if get:\n url += \"?\" + urlencode(get)\n return url", "def build_url(self, base_url, params):\r\n url_substr_list = [base_url, '?key=', self.api_key]\r\n for param, value in params.iteritems():\r\n encoded_value = urllib.quote(str(value))\r\n url_substr_list.append('&')\r\n url_substr_list.extend([param, '=', encoded_value])\r\n return ''.join(url_substr_list)", "def _to_url(self):\n url = self._target_url\n\n params = collections.defaultdict(list, copy.deepcopy(self._filters))\n if self._order_by is not None:\n params[\"sort\"] = self._order_by\n for k, vl in list(self._extra.items()):\n params[k] += vl\n\n if params:\n url += \"?\" + urllib.parse.urlencode(params, doseq=True)\n\n return url", "def buildParameter(self, name, value, type):\n # type: (str, str, int) -> IParameter", "def create_url(self, URL):\r\n return '{0}{1}'.format(self.url, URL)", "def build(self, ):\n url = self.BASE_URL\n params = {}\n for name in self.parameters:\n if isinstance(self.parameters[name], (tuple, list)):\n params[name] = ','.join(self.parameters[name])\n continue\n params[name] = self.parameters[name]\n url = url + '/' + self.VERSION\n url = url + '/' + self._request_type\n url = url + '?'\n url = url + urlparse.urlencode(params)\n return url", "def _construct_url(self, interface, suffix=''):\n # %22 is the encoding for double quotes (\") in urls.\n # % escapes the % character.\n # Double quotes are necessary in the url because switch ports contain\n # forward slashes (/), ex. 101/0/10 is encoded as \"101/0/10\".\n return '%(hostname)s/rest/config/running/interface/' \\\n '%(interface_type)s/%%22%(interface)s%%22%(suffix)s' \\\n % {\n 'hostname': self.hostname,\n 'interface_type': self.interface_type,\n 'interface': interface,\n 'suffix': '/switchport/%s' % suffix if suffix else ''\n }", "def _encode_param(self, param):\n _param = copy.deepcopy(param)\n keys_needed_pop = ['sign', 'sign_type']\n\n for key in keys_needed_pop:\n if key in _param:\n _param.pop(key)\n\n sorted_items = sorted(_param.items())\n result = '&'.join(['{}={}'.format(key, value) for key, value in sorted_items if value != ''])\n\n return result", "def build_uri_with_params(uri, param_map):\n return ''.join([\n uri,\n '?',\n '&'.join(['%s=%s' % (k, v) for (k, v) in param_map.iteritems()])\n ])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the org_no of this AccountListItem.
def org_no(self): return self._org_no
[ "def external_organization_id(self):\n return self._external_organization_id", "def pid_organization_id(self) -> str:\n return pulumi.get(self, \"pid_organization_id\")", "def getAccountNumber(self):\n return self._acctNo", "def account_number(self) -> int:\n if self._account_number == 0:\n self._account_number = self.info().account_number\n return self._account_number", "def getAccountNum(self):\n p = Selector(text=self.content).xpath('//*[@id=\"MainContent_lblAcctNum\"]/text()')\n try:\n accountnum = p.extract()[0]\n except IndexError:\n print(\"No Account Number information is available for %s\" % self.getLocation())\n return \"\"\n return accountnum", "def organization(self):\n return self._get_prop_value(self._ORGANIZATION_KEY)", "def parent_pid_organization_id(self) -> str:\n return pulumi.get(self, \"parent_pid_organization_id\")", "def org_id(self, value):\n if isinstance(value, str):\n self._org_id = value\n else:\n raise ValueError(\"org_id must be a string\")", "def get_active_organization(self):\n return self._active_organization", "def get_org(self):\n return self._get_wk_resource(_WellKnownEndpoint.LOGGED_IN_ORG)", "def org(self):\n return self.request.app.org", "def nlc_id(self):\n return self._nlc_id", "def account_id(self):\n\n return self._account_id.value", "def list_org_names(self):\n return self.org_names", "def issue_number(self):\n return self._get('issueNumber')", "def get_current_org(self, session):\n org = None\n current_org_id = session.get(\"current_org_id\") or None\n try:\n if self.is_staff:\n if current_org_id:\n org = Organisation.objects.get(pk=current_org_id)\n else:\n org = Organisation.objects.first()\n else:\n current_org_ms = None\n if current_org_id:\n current_org_ms = OrgMembership.objects.filter(\n user=self,\n org_id=current_org_id\n ).first()\n if not current_org_ms:\n current_org_ms = OrgMembership.objects.filter(\n user=self,\n ).first()\n if current_org_ms:\n org = current_org_ms.org\n except Exception as e:\n logger.exception(e)\n org = None\n return org", "def get_position_number(self):\n if self.ascender_data and 'position_no' in self.ascender_data and self.ascender_data['position_no']:\n return self.ascender_data['position_no']\n return ''", "def get_active_organization_key(self):\n return self._active_organization.key()", "def get_owner(self):\n if self.user is None:\n self.user = self.gh.get_user()\n if self.org is None:\n return self.user\n if self.owner is None:\n try:\n self.owner = [org for org in self.user.get_orgs() \\\n if org.login.lower() == self.org.lower()][0]\n except Exception as e:\n raise BaseException(\"Could not find organization '\" + str(self.org) + \\\n \"' because: \" + str(e))\n\n return self.owner", "def organization_role_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organization_role_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the org_no of this AccountListItem.
def org_no(self, org_no): self._org_no = org_no
[ "def org_id(self, value):\n if isinstance(value, str):\n self._org_id = value\n else:\n raise ValueError(\"org_id must be a string\")", "def external_org_id(self, external_org_id):\n\n self._external_org_id = external_org_id", "def org_name(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"Org_name must be a string\")\n self._org_name = value", "def set_organization_id(self, organization_id):\n raise NotImplementedError", "def external_organization_id(self, external_organization_id):\n \n self._external_organization_id = external_organization_id", "def csr_org_name(self, csr_org_name):\n\n self._csr_org_name = csr_org_name", "def update_org(self, org_info):\n current_app.logger.debug('<update_org ')\n\n existing_similar__org = OrgModel.find_similar_org_by_name(org_info['name'])\n if existing_similar__org is not None:\n raise BusinessException(Error.DATA_CONFLICT, None)\n\n self._model.update_org_from_dict(camelback2snake(org_info))\n current_app.logger.debug('>update_org ')\n return self", "def code_no(self, code_no):\n\n self._code_no = code_no", "def set_year(self, year):\n self.year = int(year) if year else None", "def csr_org_unit(self, csr_org_unit):\n\n self._csr_org_unit = csr_org_unit", "def organization_enable(self, organization_enable):\n\n self._organization_enable = organization_enable", "def external_organization_id(self):\n return self._external_organization_id", "def mod_year(self, mod_year: int):\n\n self._mod_year = mod_year", "def organization_disable(self, organization_disable):\n\n self._organization_disable = organization_disable", "def set_accountcode(self, event):\n if not self._callerid.code:\n old_accountcode = self._callerid.code\n self._callerid = self._callerid.replace(code=int(event['AccountCode']))\n self._trace('set_accountcode {} -> {}'.format(old_accountcode, self._callerid.code))\n else:\n self._trace('set_accountcode ignored {} -> {}'.format(self._callerid.code, event['AccountCode']))", "def pid_organization_id(self) -> str:\n return pulumi.get(self, \"pid_organization_id\")", "def set_active_organization(self, new_organization):\n user_type = self._active_user.user_type\n successful = False\n if (user_type.change_active_organization):\n try:\n if (new_organization.kind() == \"Organization\"):\n self._active_organization = new_organization\n successful = True\n except:\n pass\n return successful", "def account_number(self) -> int:\n if self._account_number == 0:\n self._account_number = self.info().account_number\n return self._account_number", "def on_put(self, req, resp, orgid):\n mapper = self.meta.get('mapper')\n o = mapper.organization.Organization.get_by_uid(orgid)\n if o is None:\n raise falcon.HTTPInvalidParam('Organization not found', 'orgid')\n \n body = req.context['body']\n # look for changes to name, description, status, parameters, and data\n if 'name' in body:\n o.set_name(body['name'].strip())\n if 'status' in body:\n o.set_status(body['status'].strip())\n if 'custom_data' in body and isinstance(body['custom_data'], dict):\n o.set_custom_data(body['custom_data'])\n if 'data' in body and isinstance(body['data'], list):\n # body['data'] = [{'key': 'spam', 'value': 'eggs'}, ...]\n o.set_data(body['data'])\n \n o = mapper.organization.Organization.update_from_object(o)\n resp.body = {\"organization\": o.to_dict()}\n return True", "def year(self, new_year_value):\n if new_year_value < 0:\n raise ActivityValidatorError(\"Year cannot be negative! (or could it!? xD)\\n\")\n self.__date[\"year\"] = new_year_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the uni_customer_no of this AccountListItem.
def uni_customer_no(self): return self._uni_customer_no
[ "def customer_id(self):\n if \"customerId\" in self._prop_dict:\n return self._prop_dict[\"customerId\"]\n else:\n return None", "def customer_code(self) -> str:\n return self._customer_code", "def customer_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"customer_id\")", "def getAccountNumber(self):\n return self._acctNo", "def account_number(self) -> int:\n if self._account_number == 0:\n self._account_number = self.info().account_number\n return self._account_number", "def customer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"customer_id\")", "def getAccountNum(self):\n p = Selector(text=self.content).xpath('//*[@id=\"MainContent_lblAcctNum\"]/text()')\n try:\n accountnum = p.extract()[0]\n except IndexError:\n print(\"No Account Number information is available for %s\" % self.getLocation())\n return \"\"\n return accountnum", "def get_customer(self):\n if self.customer_id:\n return self.client.customers.get(self.customer_id)", "def account_id(self):\n\n return self._account_id.value", "def debit_account_uid(self):\n if self.is_null():\n return None\n else:\n return self._debit_account_uid", "def nicid(self):\n return self.data.get('nicid')", "def getCustomerByFiducialNumber(self, num):\n for customer in self.getCustomers(): \n if customer.getFiducialNum() == num: \n return customer", "def nlc_id(self):\n return self._nlc_id", "def uid(self):\n ret = self._get_attr(\"UID\")\n return ret", "def get_user_account_id(self):\n return self.response_json[\"account\"][\"id\"]", "def serial_number(self) -> str:\n return self._serial_number", "def external_contact_id(self):\n return self._external_contact_id", "def get_customer_id(token: str = None) -> str:\n cid = current_user.customer_id\n if not cid and token:\n cid = stripe.Customer.create(\n email=current_user.email,\n source=token\n ).id\n current_user.customer_id = cid\n db.session.commit()\n return cid", "def get_numero(self):\r\n return self.__numero", "def account_id(self): # DG: renamed\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the uni_customer_no of this AccountListItem.
def uni_customer_no(self, uni_customer_no): self._uni_customer_no = uni_customer_no
[ "def customer_code(self, customer_code: str):\n\n self._customer_code = customer_code", "def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number", "def next_customer_number(self, next_customer_number):\n\n self._next_customer_number = next_customer_number", "def channel_customer_no(self, channel_customer_no):\n if (self.local_vars_configuration.client_side_validation and\n channel_customer_no is not None and len(channel_customer_no) > 50):\n raise ValueError(\"Invalid value for `channel_customer_no`, length must be less than or equal to `50`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_customer_no is not None and len(channel_customer_no) < 0):\n raise ValueError(\"Invalid value for `channel_customer_no`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._channel_customer_no = channel_customer_no", "def customer_code(self) -> str:\n return self._customer_code", "def set_fumi_number(self, number):\n self._cp['fumi_number'] = number", "def customer_id(self):\n if \"customerId\" in self._prop_dict:\n return self._prop_dict[\"customerId\"]\n else:\n return None", "def set_number_served(self,customers):\n self.number_served = customers", "def set_delivery_customer(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_DELIVERY_CUSTOMER).\n send_keys(value))", "def code_no(self, code_no):\n\n self._code_no = code_no", "def account_number(self) -> int:\n if self._account_number == 0:\n self._account_number = self.info().account_number\n return self._account_number", "def team_customer_permission(self, team_customer_permission):\n\n self._team_customer_permission = team_customer_permission", "def useridentifier(self, useridentifier):\n self._useridentifier = useridentifier", "def is_customer_notified(self, is_customer_notified):\n if is_customer_notified is None:\n raise ValueError(\"Invalid value for `is_customer_notified`, must not be `None`\")\n\n self._is_customer_notified = is_customer_notified", "def customer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"customer_id\")", "def customer_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"customer_id\")", "def remove_customer(self, index):\n self.customer_list.pop(index)", "def serial_number(self, value):\n\n self._serial_number.set(value)", "def addToLine(self, customer):\r\n self.customers.put(customer)\r\n self.total_items += customer.cartSize", "def getAccountNumber(self):\n return self._acctNo" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the last_modified of this AccountListItem.
def last_modified(self): return self._last_modified
[ "def last_modified(self):\n return self.metadata.last_modified", "def last_modified_at(self) -> \"datetime\":\n return self._attrs.get(\"last_modified_at\")", "def getlastmodified(self):\n t = calendar.timegm(time.gmtime(self.st_mtime))\n return DAVElement.getlastmodified(\n formatdate(t, localtime=False, usegmt=True) )", "def last_modified(self):\n return remote_to_local_datetime(self.last_modified_string)", "def last_modified(self) -> datetime:\n return self.workspace.get_last_modified(self.file)", "def lastmodifieddate(self):\n return datetime.utcfromtimestamp(\n self.st_mtime)", "def last_modified(self):\n if self.modified:\n return self.modified\n \n latest = never\n for t in self.__items:\n if t.modified > latest:\n latest = t.modified\n \n self.modified = latest\n return self.modified", "def LastModifiedTime(self):\n\t\treturn os.path.getmtime( self.FilePath )", "def last_modified_by(self) -> \"str\":\n return self._attrs.get(\"last_modified_by\")", "def lastmodified(self):\n t = calendar.timegm(time.gmtime(self.st_mtime))\n return formatdate(t, localtime=True, usegmt=True)", "def get_last_modified(self, product):\n timestamp = product.get('last_modified_t')\n return datetime.date.fromtimestamp(timestamp)", "def _get_lastUpdatedBy(self) -> \"adsk::core::Ptr< adsk::core::User >\" :\n return _core.DataFile__get_lastUpdatedBy(self)", "def last_edit_timestamp(self):\n return self._last_edit_timestamp", "def last_modified_at(self, last_modified_at: \"datetime\"):\n self._attrs[\"last_modified_at\"] = last_modified_at", "def modification_time(self):\n ret = self._get_attr(\"modificationTime\")\n return ret", "def last_modified(resourcelist):\n lastmod = None\n for resource in resourcelist:\n rlm = resource.lastmod\n if rlm > lastmod:\n lastmod = rlm\n\n return lastmod", "def modified_since(self):\n return self._modified_since", "def last_plan_modified_date(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_plan_modified_date\")", "def last_modified(self):\n return self.session.query(func.max(SwissVote.last_change)).scalar()", "def max_last_modified_timestamp(self) -> datetime:\n return self.postgresql_wrapper.execute(MAX_LAST_MODIFIED_TIMESTAMP_SQL)[0][0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the last_modified of this AccountListItem.
def last_modified(self, last_modified): self._last_modified = last_modified
[ "def last_modified_at(self, last_modified_at: \"datetime\"):\n self._attrs[\"last_modified_at\"] = last_modified_at", "def date_modified_billing(self, date_modified_billing):\n\n self._date_modified_billing = date_modified_billing", "def last_modified_at(self) -> \"datetime\":\n return self._attrs.get(\"last_modified_at\")", "def lastmodifieddate(self):\n return datetime.utcfromtimestamp(\n self.st_mtime)", "def setModified(self):\n self.modified = True", "def set_last_update_time(self, time):\n self.last_updated = time", "def last_modified_by(self) -> \"str\":\n return self._attrs.get(\"last_modified_by\")", "def config_setting_last_modified(self, config_setting_last_modified):\n\n self._config_setting_last_modified = config_setting_last_modified", "def last_modified(self):\n return self.metadata.last_modified", "def last_created(self, last_created):\n\n self._last_created = last_created", "def setModified(self, modified=True):\n if modified != self.modified:\n self.modified = modified\n self.allActions['FileSave'].setEnabled(modified)\n self.resetAutoSave()\n pluginInterface = globalref.mainControl.pluginInterface\n if pluginInterface and modified:\n pluginInterface.execCallback(pluginInterface.fileModCallbacks)", "def last_user_at(self, value):\n self._last_user_at = value", "def edited_by(self, edited_by):\n\n self._edited_by = edited_by", "def last_modified(self):\n if self.modified:\n return self.modified\n \n latest = never\n for t in self.__items:\n if t.modified > latest:\n latest = t.modified\n \n self.modified = latest\n return self.modified", "def lastmodified(date_obj):\n web.header('Last-Modified', net.httpdate(date_obj))", "def task_last_modified(self, operator: Enum, task_last_modified: Arrow | datetime | int | str):\n task_last_modified = self.util.any_to_datetime(task_last_modified).strftime(\n '%Y-%m-%d %H:%M:%S'\n )\n self._tql.add_filter('taskLastModified', operator, task_last_modified, TqlType.STRING)", "def setModificationTime(self):\n path = self._path\n if path is not None and os.path.exists(path):\n try:\n # this may fail on some filesystems (e.g. SMB servers)\n os.utime(path, None)\n except OSError as e:\n logger.warning(\"Failed to set modified time: %s\", e)", "def last_changes(self, last_changes):\n\n self._last_changes = last_changes", "def last_modified(self) -> datetime:\n return self.workspace.get_last_modified(self.file)", "def last_modified(self):\n return remote_to_local_datetime(self.last_modified_string)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the dealer_id of this AccountListItem.
def dealer_id(self): return self._dealer_id
[ "def account_id(self):\n\n return self._account_id.value", "def get_ad_id(self):\n return self.get_item(conf.AD_ID_KEY)", "def get_user_account_id(self):\n return self.response_json[\"account\"][\"id\"]", "def get_account_id(self):\n return self.wepay_account_id", "def debit_account_uid(self):\n if self.is_null():\n return None\n else:\n return self._debit_account_uid", "def get_card_id(self):\n return self.card_id", "def item_id(self):\n return self.content[\"item_id\"]", "def account_id(self): # DG: renamed\n pass", "def item_id(self):\r\n return self.content['item_id']", "def get_id_receiver(self):\n return self.id_receiver", "def truck_id(self):\n return self._id", "def card_id(self):\n # type: () -> int\n return self._get_property('card_id')", "def account_id_secretary(self) -> int:\n return self._account_id_secretary", "def originator_id(self) -> UUID:\n return self.__dict__[\"originator_id\"]", "def receiver_id(self) -> UserId:\n return self._receiver_id", "def _get_agent_id(self):\r\n agents = Agents(self._client_object)\r\n return agents.get(self.agent_name).agent_id", "def book_id(self) -> int:\n return self._book_id", "def listing_id(self) -> int:\n return self._listing_id", "def get_originator_id(self):\n\n return self.originator_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the dealer_id of this AccountListItem.
def dealer_id(self, dealer_id): self._dealer_id = dealer_id
[ "def dealer_id(self):\n return self._dealer_id", "def payer_id(self, payer_id):\n\n self._payer_id = payer_id", "def advertisement_id(self, advertisement_id):\n\n self._advertisement_id = advertisement_id", "def set_id_receiver(self, id_receiver):\n self.id_receiver = id_receiver", "def set_originator_id(self, originator_id):\n\n self.originator_id = originator_id\n return 1", "def wearer_id(self, wearer_id):\n\n self._wearer_id = wearer_id", "def set_card_id(self, card_id):\n self.card_id = card_id", "def paypal_id(self, paypal_id):\n\n self._paypal_id = paypal_id", "def shopper_id(self, shopper_id):\n\n self._shopper_id = shopper_id", "def originator_order_id(self, originator_order_id):\n\n self._originator_order_id = originator_order_id", "def fulfillment_id(self, fulfillment_id):\n\n self._fulfillment_id = fulfillment_id", "def tradeId(self, tradeId):\n\n self._tradeId = tradeId", "def current_pitcher_id(self, current_pitcher_id):\n\n self._current_pitcher_id = current_pitcher_id", "def partner_id(self, partner_id: UserId):\n\n self._partner_id = partner_id", "def set_account_split_id(self, account_split_id):\n self.account_split_id = account_split_id", "def book_id(self, book_id: int):\n\n self._book_id = book_id", "def card_id(self, value):\n self._set_property('card_id', value)", "def managed_by_linked_account_merchant_id(self, managed_by_linked_account_merchant_id):\n\n self._managed_by_linked_account_merchant_id = managed_by_linked_account_merchant_id", "def sell_member_id(self, sell_member_id):\n\n self._sell_member_id = sell_member_id", "def id(self, vehicleId):\n self._id = vehicleId" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the dealer_name of this AccountListItem.
def dealer_name(self): return self._dealer_name
[ "def getAcctHolderName(self):\n return self._acctHolderName", "def account_name(self) -> str:\n return self['accountName']", "def dealer_id(self):\n return self._dealer_id", "def _get_name(self) -> \"std::string\" :\n return _core.ListItem__get_name(self)", "def get_name_item(self):\n return self.name_item", "def donor_name(self):\n return self.name", "def item_name(self) -> Optional[str]:\n return self.data.get(self._DATA_KEY_ITEM_NAME)", "def get_name(self):\n return self.__name_army", "def get_menu_item_name(self):\n return self.menu_item_name", "def getName(self) -> str:\n return self._firstname + ' ' + self._lastname", "def get_name(self):\r\n return self._client_name", "def name(self):\n return self.dn", "def get_company_name(self):\r\n return self.company_name", "def name(self):\n req = requests.get(\n f'https://api.vanguard.com/rs/ire/01/pe/fund/{self._fund_id}'\n '/profile.json',\n headers={'Referer': 'https://vanguard.com/'})\n req.raise_for_status() # Raise if error\n return req.json()['fundProfile']['longName']", "def get_client_name(self):\n client_name_mask = self._fuzzy_match_series(self.closeout_df['item'], 'name', errors=1)\n name = self.closeout_df[client_name_mask]['item'].values[0]\n name = name.replace('name', '').strip()\n return name", "def get_name(self, user):\n return user.display_name", "def get_name(self):\r\n return self._player_name", "def get_author_name(obj):\n return obj.author.username", "def _get_name(self) -> \"std::string\" :\n return _core.DropDownControl__get_name(self)", "def agent_name(self) -> str:\n return pulumi.get(self, \"agent_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the dealer_name of this AccountListItem.
def dealer_name(self, dealer_name): self._dealer_name = dealer_name
[ "def set_name_item(self, item_name):\n self.name_item = item_name", "def set_player_name(self, player):\r\n self.__name = player", "def referrer_name(self, referrer_name):\n\n self._referrer_name = referrer_name", "def dealer_id(self):\n return self._dealer_id", "def book_name(self, book_name: str):\n\n self._book_name = book_name", "def applicant_name(self, applicant_name):\n self._applicant_name = applicant_name", "def set_user_name(self, user_name): \n self.user_name = user_name", "def payer_id(self, payer_id):\n\n self._payer_id = payer_id", "def set_name(self, new_name):\n self.name = new_name", "def autoname(self):\n\t\tif not self.email_account_name:\n\t\t\tself.email_account_name = (\n\t\t\t\tself.email_id.split(\"@\", 1)[0].replace(\"_\", \" \").replace(\".\", \" \").replace(\"-\", \" \").title()\n\t\t\t)\n\n\t\tself.name = self.email_account_name", "def recipient_name(self, recipient_name):\n self._recipient_name = recipient_name", "def change_name(self, name):\n self._player_name = name", "def Edit_Contact_Name(self, index, name):\n self.__contactList[index].Set_Name(name)", "def owner_name(self, owner_name):\n \n self._owner_name = owner_name", "def caller_name(self, caller_name):\n\n self._caller_name = caller_name", "def renter_name(self, renter_name):\n if renter_name is not None and len(renter_name) > 20:\n raise ValueError(\"Invalid value for `renter_name`, length must be less than or equal to `20`\") # noqa: E501\n\n self._renter_name = renter_name", "def campaign_name(self, campaign_name):\n \n self._campaign_name = campaign_name", "def set_dataverse_name(self, dataverse_name):\n self.dataverse = dataverse_name", "def name_other(self, name_other):\n self._name_other = name_other", "def initiator_name(self, initiator_name):\n if initiator_name is None:\n raise ValueError(\"Invalid value for `initiator_name`, must not be `None`\")\n\n self._initiator_name = initiator_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the dealer_reference of this AccountListItem.
def dealer_reference(self): return self._dealer_reference
[ "def dealer_id(self):\n return self._dealer_id", "def account_ref(self):\n return self._account_ref", "def debit_account_uid(self):\n if self.is_null():\n return None\n else:\n return self._debit_account_uid", "def getAcctHolderName(self):\n return self._acctHolderName", "def item_reference(self) -> str:\n return self._gtin14[self._company_prefix_length + 1:13]", "def debit_note_uid(self):\n return self._debit_note_uid", "def account_id(self):\n\n return self._account_id.value", "def referral_code(self):\n return self._referral_code", "def referred_by(self) -> str:\n return self._referred_by", "def get_dealer_value(self):\n return self._dealer.get_value()", "def deal_card(self):\r\n deal = Deck()._deal()\r\n return deal[0]", "def account(self):\n return self.__account", "def order_referrer(self):\n return self._order_referrer", "def receivables_account(self) -> Account:\n row = AccountEntry.objects.filter(source_invoice=self).order_by('id').first()\n return row.account if row else None", "def bank_account(self) -> BankAccount:\n return self._bank_account", "def get_card_id(self):\n return self.card_id", "def get_ad_id(self):\n return self.get_item(conf.AD_ID_KEY)", "def get_account(self):\n return self.fetch_data(\"account\")", "def getAccountNumber(self):\n return self._acctNo", "def get_account_id(self):\n return self.wepay_account_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the dealer_reference of this AccountListItem.
def dealer_reference(self, dealer_reference): self._dealer_reference = dealer_reference
[ "def dealer_id(self):\n return self._dealer_id", "def account_ref(self, account_ref):\n\n self._account_ref = account_ref", "def payer_id(self, payer_id):\n\n self._payer_id = payer_id", "def set_carrier_data_reference(self, reference):\n value, name = reference, 'set_carrier_data_reference()'\n self.carrier_data_reference = self._value_to_latin(value, name)", "def set_owner_reference(self, set_owner_reference):\n\n self._set_owner_reference = set_owner_reference", "def referred_by(self, referred_by: str):\n\n self._referred_by = referred_by", "def card_authorization_reference_number(self, card_authorization_reference_number):\n\n self._card_authorization_reference_number = card_authorization_reference_number", "def managed_by_linked_account_merchant_id(self, managed_by_linked_account_merchant_id):\n\n self._managed_by_linked_account_merchant_id = managed_by_linked_account_merchant_id", "def reference_value(self, reference_value: ReferenceValue):\n if reference_value is None:\n raise ValueError(\"Invalid value for `reference_value`, must not be `None`\") # noqa: E501\n\n self._reference_value = reference_value", "def set_reference(self, reference_tokens):\n self.lock.acquire()\n self._reference = MeteorReference(reference_tokens, self)\n self.lock.release()", "def refer_url(self, refer_url):\n\n self._refer_url = refer_url", "def setReference(self, reference: 'char const *') -> \"void\":\n return _coin.ScXMLReferenceDataObj_setReference(self, reference)", "def set_card_id(self, card_id):\n self.card_id = card_id", "def reviewed_by(self, reviewed_by):\n\n self._reviewed_by = reviewed_by", "def dealer_added_f(self, dealer_added_f):\n\n self._dealer_added_f = dealer_added_f", "def change_limiter(self, limiter):\n self.num_limit=limit.as_limiter(limiter)\n self.set_value(self._value)", "def wearer_id(self, wearer_id):\n\n self._wearer_id = wearer_id", "def destination_client(self, destination_client):\n\n self._destination_client = destination_client", "def candidate_party_affiliation(self, candidate_party_affiliation):\n\n self._candidate_party_affiliation = candidate_party_affiliation", "def on_pick_reference(self, picker, reference, parent):\n child = parent.new_child(CONTENT_TYPE_MENDELEY_LINK, reference.as_text_reference())\n child.set_attr('icon', MENDELEY_ICON_NAME)\n child.set_attr(MENDELEY_UUID_ATTR, reference.uuid)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create money from amount.
def FromAmount(amount): m = Money() m.Amount = amount return m
[ "def convert(self, amount):\n return self.compute(\n request.nereid_website.company.currency.id,\n amount,\n request.nereid_currency.id)", "def test_creation(self):\n result = Money(10, 'USD')\n self.assertEqual(result.amount, 10)\n\n result = Money(-10, 'USD')\n self.assertEqual(result.amount, Decimal(\"-10\"))\n\n result = Money(Decimal(\"10\"), 'USD')\n self.assertEqual(result.amount, Decimal(\"10\"))\n\n result = Money(Decimal(\"-10\"), 'USD')\n self.assertEqual(result.amount, Decimal(\"-10\"))\n\n result = Money('10.50', 'USD')\n self.assertEqual(result.amount, Decimal(\"10.50\"))\n\n result = Money('-10.50', 'USD')\n self.assertEqual(result.amount, Decimal(\"-10.50\"))\n\n result = Money(u'10.50', u'USD')\n self.assertEqual(result.amount, Decimal(\"10.50\"))\n\n result = Money(u'-10.50', u'USD')\n self.assertEqual(result.amount, Decimal(\"-10.50\"))", "def add_money(self, amount):\n try:\n self.balance += amount\n return self.balance\n except TypeError:\n print('(Error): The add_money method requires a numeric value')", "def transfer_money(self, from_, to, amount):\n self.sub(from_, amount)\n self.add(to, amount)", "def test_convert_amount():\n money = convert_amount(\"1.000,00€\")\n assert money.amount == Decimal(\"1000.00\")\n assert money.currency == EUR", "def raw_to_nano(amount):\n return to_nano(amount, exp=30)", "def deposit(self, amount):\n bal = self.get_balance()\n bal += amount\n return bal", "def bet_money(self, amount):\n self.money -= amount # money 100->80,\n self.bet += amount # bet 0-> 20", "def parse_money(value, currency):\n\n # I believe the specified regex pattern is flexible enough for our purposes.\n # This way also provides a way for the possibility to use other currencies,\n # and other decimal notation.\n if match(\".+,[0-9]+$\", value):\n return Money(\n # Money(value=...) doesn't take kindly to commas.\n # Thus the thousands-separators, if any, are removed.\n # The comma is replaced by a dot, which Money() can parse.\n value.replace(\".\", \"\").replace(\",\", \".\"),\n currency\n )\n return Money(value.replace(\",\", \"\"), currency)", "def eth_to_usd(self, amount: Decimal) -> Decimal:\n return self.eth_rate * amount", "def withdraw_money(self, amount):\n self.balance -= amount", "def create_money_transfer(request_info: Dict) -> None:\n\n amount = Decimal(request_info.get(\"amount\"))\n\n with transaction.atomic():\n payer_name = request_info.get(\"payer\")\n recipient_name = request_info.get(\"recipient\")\n accounts = Account.objects.select_for_update().filter(\n Q(account=payer_name) | Q(account=recipient_name)\n )\n\n # len(accounts) < 2 when payer or recipient doesn't exist\n if len(accounts) < 2:\n raise AccountDoesNotExistError\n\n # acc_ordered_dict - creating dictionary from query to get info about account\n # with the key, where key - account_name\n acc_ordered_dict = {account.account: account for account in accounts}\n payer = acc_ordered_dict.get(payer_name)\n recipient = acc_ordered_dict.get(recipient_name)\n\n if payer.balance < amount:\n raise MoneyIsNotEnoughError\n\n payer.balance -= amount\n payer.save()\n recipient.balance += amount\n recipient.save()\n\n # creating data for making historical information about transaction\n request_info = request_info | {\n \"payer_id\": payer.id,\n \"recipient_id\": recipient.id,\n \"income_outcome\": False,\n }\n create_transfer_info(request_info)", "def return_money(self, money):\n self._caccount = int(self._caccount + money)\n report = {\n 'input': money,\n 'kind': 'storno',\n 'description': 'transfer did not succeeded'}\n self.make_report(**report)", "def create_account(account_id, amount):\n create_account = Balance(\n account_id=account_id, balance=amount)\n create_account.save()", "def deposit(self, amount):\n if amount < 0:\n raise ValueError('Negative deposit')\n else:\n self.bal += amount", "def create_decimal(value, default=0):\n\n from decimal import Decimal as _Decimal\n\n if value is None:\n return _Decimal(0, get_decimal_context())\n\n try:\n d = _Decimal(\"%.6f\" % value, get_decimal_context())\n except:\n value = _Decimal(value, get_decimal_context())\n d = _Decimal(\"%.6f\" % value, get_decimal_context())\n\n if d <= -1000000000000:\n from Acquire.Accounting import AccountError\n raise AccountError(\n \"You cannot create a balance with a value less than \"\n \"-1 quadrillion! (%s)\" % (value))\n\n elif d >= 1000000000000000:\n from Acquire.Accounting import AccountError\n raise AccountError(\n \"You cannot create a balance with a value greater than \"\n \"1 quadrillion! (%s)\" % (value))\n\n return d", "def to_internal_value(self, value):\n try:\n value = Money(value)\n except IncorrectMoneyInputError:\n self.fail('invalid')\n\n return self.validate_precision(value)", "def format_usd(amount):\n\n return f\"${amount:,.0f}\"", "def int_to_amount(int_num,precision=0):\n value = uint256()\n value.fourth = int(int_num % UINT64_NUMBER)\n int_num //= UINT64_NUMBER\n value.third = int(int_num % UINT64_NUMBER)\n int_num //= UINT64_NUMBER\n value.second = int(int_num % UINT64_NUMBER)\n int_num //= UINT64_NUMBER\n value.first = int(int_num)\n return Amount(\n value = value,\n precision = precision\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }