query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Add song to the storage directory and to the database. Return ID of the new song / error message.
def add_song(self): path = input("Give file path:\t") # Request file path path = path.replace('\\', '/') if self.path_song_re.match(path) and not self.path_storage_re.match( path): # Check that the path leads to a song that is not already found in Storage copy(path, self.p_storage) # Copy the song to the storage directory file_title, form = path.split("/")[-1].split(".") # Save file title and format from the path sql = "SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s" # Check the existence of a song # with the same title and format in the database self.cursor.execute(sql, (file_title, form)) r = self.cursor.fetchall() if r[0][0] != 0: return "A song with this file name and format already exists!" song_title = input("Song title:\t") artist = input("Artist:\t") data = input("Release date:\t") tags = input("Associated tags:\t") sql = "INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, " \ "%s) " # Insert song into database columns = (file_title, song_title, artist, form, data, tags) self.cursor.execute(sql, columns) self.cnx.commit() self.cursor.execute( "SELECT MAX(ID) FROM songs") result = self.cursor.fetchall() return "New song ID: " + str(result[0][0]) else: return "Give valid path"
[ "def add_song():\n song_to_playlist(PLAYLIST_FOLDER, request.form['Song'], request.form['PlayLID'])\n return jsonify(result=str(request.form['PlayLID']))", "def add_song(song):\n try:\n c = connection.cursor()\n c.execute(\"\"\"INSERT INTO DMC_SONG_LIST(id, title, game, category, url) values(?, ?, ?, ?, ?)\"\"\", (song.id, song.title, song.game, song.category, song.url))\n connection.commit()\n\n except Error as e:\n print(e)", "def add_song():\n options = queue.instantiate_options()\n raw_queue = queue.instantiate_queue()\n track_id = request.args.get('song')\n\n for song in raw_queue:\n if song['track_id'] == track_id[14:]:\n return json.dumps({'error': 'Cannot add a song already in the queue'})\n\n num_songs_added = 0\n for song in raw_queue:\n if song['added_by'] == session['id']:\n num_songs_added += 1\n\n if num_songs_added >= int(options['max_individual_songs']):\n print('user reached max songs')\n return json.dumps({'error': \"You are not allowed to add any more songs until one plays\"})\n\n song_obj = create_song(track_id, added_by=session['id'])\n queue.addSong(song_obj)\n queue_change()\n return json.dumps({'success': 'added ' + track_id})", "def test_add_song(self):\n song_id = self._add_song()\n self.assertIsNotNone(song_id)", "def insert_song(self, song_name: str, title: str, artist: str, file_hash: str, total_hashes: int) -> int:\n id = random.randint(1, 1000000000000)\n song = Songs(meta={'id': id}, song_name=song_name, song_title=title, artist=artist, file_sha1=file_hash, total_hashes=total_hashes)\n song.save()\n return id", "def add_song_to_queue(song):\n\n songID = song.id\n try:\n message = sp.add_to_queue(songID)\n except SpotifyException:\n message = \"Error - No Active Device\"\n return message", "def add_song(data):\n\n # STEP 1: Extraxt data\n token = data['token']\n new_song = data['song']\n\n if (not token) or (not new_song):\n # send_error_message()\n return\n\n # STEP 2: Get session and check auth\n user = User.objects.get(id=token)\n\n # STEP 3: Update session data\n session = Session.objects.get(id=str(user.session.id))\n songs = session.songs\n\n # check if our song in songs already\n s: Song\n already_included = False\n for s in songs:\n if str(s.song_data['uri']) == str(new_song['uri']):\n already_included = True\n break\n\n if not already_included:\n # create this song and add to session\n new_song_obj = Song()\n new_song_obj.song_data = new_song\n new_song_obj.upvote_users.append(token)\n session.songs.append(new_song_obj)\n session.save()\n\n # STEP 4: Send out update -- if there was a change\n send_session_update(session)", "def add_song(_name_of_the_song, _duration_in_number_of_seconds):\r\n # creating an instance of our Song constructor\r\n new_song = Song(name_of_the_song=_name_of_the_song,\r\n duration_in_number_of_seconds=_duration_in_number_of_seconds)\r\n db.session.add(new_song) # add new song to database session\r\n db.session.commit() # commit changes to session\r", "def save(self):\n self._check_db()\n song = self._create_song()\n song.save()", "def add_song(self, song):\n self.songs.append(song)", "def add_song_to_queue_from_id(song_id):\n\n try:\n message = sp.add_to_queue(song_id)\n except SpotifyException:\n message = \"Error - No Active Device\"\n return message", "def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def add_song(self, dto):\n song = Song(\"\", \"\", \"\", \"\").from_string(dto)\n if not self._validator.validate(song):\n raise ValueError(\"Invalid song details\")\n self._repository.add(song)", "def add_song(self, song: Song):\n self.playlist.append(song)", "def add_song(self, song):\n self.songs += [song]", "def add_song(self, song, position=None):\r\n if position is None:\r\n self.track.append(song)\r\n else:\r\n self.track.insert(position, song)", "def add_song(self):\n # Error check for blank inputs\n if \"\" in (self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text):\n self.root.ids.status_text.text = \"All fields must be completed\"\n return\n # Error check for negative numbers\n try:\n if int(self.root.ids.input_year.text) < 0:\n self.root.ids.status_text.text = \"Year must be >= 0\"\n return\n # Error check for invalid numbers\n except ValueError:\n self.root.ids.status_text.text = \"Please enter a valid number\"\n return\n # Song add, clear inputs, sort songlist\n song_to_add = Song(self.root.ids.input_title.text, self.root.ids.input_artist.text,\n int(self.root.ids.input_year.text))\n self.songs.add_song(song_to_add)\n SongsToLearnApp.clear_inputs(self)\n self.sort_songs(self.root.ids.sort_options.text)", "def add_track(self, track):\n if self.song_file == None:\n return constants.NO_FILE_ERROR\n try:\n self.song_file.add_track(track)\n except ValueError as e:\n return \"Error: {}\".format(e.args[0])\n self.song_file.show()\n return constants.SUCCESS", "def import_song(self, song, playlist):\n\n try:\n song_uri = self.find_song_uri(song)\n except SongNotFoundError as e:\n print(f\"could not find song {song} to add to playlist '{playlist['name']}'\")\n else:\n self.add_song_to_playlist(song_uri, playlist[\"id\"])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove song from database and from the storage directory based on ID
def delete_song(self): song_id = tuple(input("Give the melody id to be deleted:\t")) sql = "SELECT file_title, form FROM songs WHERE id = %s" # Check existence of song with given ID self.cursor.execute(sql, song_id) result = self.cursor.fetchall() if len(result) > 0: path = self.p_storage + "/" + result[0][0] + "." + result[0][ 1] # Find path of song by appending the name and format to the storage directory path os.remove(path) # Remove song from directory sql = "DELETE FROM songs WHERE id = %s" # Delete song from database self.cursor.execute(sql, song_id) self.cnx.commit() print(self.cursor.rowcount, "record(s) deleted") else: print("Give a valid id...")
[ "def delete_song(_id):\r\n Song.query.filter_by(id=_id).delete()\r\n # filter song by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def delete_song(database, song_name: str):\n delere = False\n deleted = None\n song_id = song_name_to_ID(song_name)\n for fingerprint in database:\n if database[fingerprint] == song_id:\n deleted = database.pop(fingerprint)\n delere = True\n return (delere, deleted)", "def delete_song(song):\n logging.debug('{CRUD_operations} BEGIN function delete_song()')\n logging.debug('{CRUD_operations} Data received: song: %s', song)\n song.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_song()')", "def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))", "def song_delete(id):\n \n # get the song from the database\n song = session.query(models.Song).get(id)\n \n # check if song exists\n # if not return a 404 with a helpful message\n if not song:\n message = \"Could not find song with id {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")\n \n # successfully delete song\n message = \"Successfully deleted song with id {}\".format(id)\n data = json.dumps({\"message\": message, \"song\": song.as_dictionary()})\n \n session.delete(song)\n session.commit()\n \n return Response(data, 200, mimetype=\"application/json\")", "def disassociate_song(self, song):\n self.songs.remove(song)", "def delete_track(id):\n\twith _conn, _conn.cursor() as cur:\n\t\tcur.execute(\"DELETE FROM tracks WHERE id = %s\", (id,))", "def remove_song(self, song):\n # code omitted\n self.playlist.remove(song)", "def test_deleting_a_song(self):\r\n response = self.app.delete(\r\n \"/api/1.0/songs/{}\".format(self.valid_song_ids[2]),\r\n headers={\r\n 'User': self.user_id,\r\n 'Authorization': self.access_token\r\n }\r\n )\r\n\r\n song = Songs.query.filter_by(SongID=self.valid_song_ids[2]).first()\r\n\r\n self.assertEqual(204, response.status_code)\r\n self.assertEqual(None, song)", "def delete(self, show_id):\r\n song = Shows.query.filter_by(ShowID=show_id).first_or_404()\r\n db.session.delete(song)\r\n db.session.commit()\r\n return make_response(\"\", 204)", "def delFile(self, id):\n file = self._storage.folder / self._storage.nameTemplate.format(id = id)\n file.unlink()", "def remove_song(self):\n self.stop()\n self.listbox.delete(\"anchor\")\n pygame.mixer.music.stop()", "def delete_photo_by_id(conn, id):\n cur = conn.cursor()\n cur.execute(\"DELETE FROM Photos WHERE ID=?\", (id,))\n conn.commit()", "def delete_song(self, song_id):\n fav = Favorites.get(Favorites.id == song_id)\n try:\n fav.delete_instance()\n except Exception as ex:\n logger.error('Exception when deleting favorite song: ' + str(ex))\n return song_id, 400\n return song_id, 200", "def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)", "async def clear_data(self, msg):\n name = self.player[msg.guild.id]['name']\n os.remove(name)\n self.player['audio_files'].remove(name)", "def remove(self, path):", "def remove_Pet(self, id, directory):\n for i, pet in enumerate(directory):\n if pet.id == id:\n del directory[i]\n break", "def deleteFile(pk):\n ap = Apostila.objects.get(pk=pk)\n try:\n os.remove(os.path.join(MEDIA_ROOT, str(ap.file)))\n except:\n pass # silence error when no file is there" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modifies song info in the database
def modify_data(self): song_id = tuple(input("Give the id of the song to be modified:\t")) # Request song ID sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" # Find song with given ID self.cursor.execute(sql, song_id) res = self.cursor.fetchall() if len(res) > 0: while True: sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" # Save current info self.cursor.execute(sql, song_id) result = self.cursor.fetchall() modify = input( "What do you want to modify? [title/artist/(release )date/tags/none]\t") # Request data to be # modified if modify == 'title': # Modify title print('Current title is ' + result[0][0]) new = (input('Give new title:\t'), song_id[0]) sql = "UPDATE songs SET song_title = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New title assigned") if modify == 'artist': # Modify artist print('Current artist is ' + result[0][1]) new = (input('Give new artist:\t'), song_id[0]) sql = "UPDATE songs SET artist = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New artist assigned") if modify == 'date': # Modify release date print('Current date is ' + result[0][2]) new = (input('Give new date:\t'), song_id[0]) sql = "UPDATE songs SET data = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New date assigned") if modify == 'tags': # Modify tags print('Current tags are ' + result[0][3]) new = (input('Give new tags:\t'), song_id[0]) sql = "UPDATE songs SET tag = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New tags assigned") if modify == 'none': # Do not modify anything, print the current song info sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" self.cursor.execute(sql, song_id) result = self.cursor.fetchall() print( "Current data for the song with id" + song_id[0] + "are:\ntitle:" + result[0][0] + "\nartist:" + result[0][1] + "\nrelease date:" + result[0][2] + "\ntags:" + result[0][3]) break else: print("Give a valid id...")
[ "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def update_song(self, song):\n return self.create_song(song)", "def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"", "def update_song(_id, _name_of_the_song, _duration_in_number_of_seconds):\r\n song_to_update = Song.query.filter_by(id=_id).first()\r\n song_to_update.name_of_the_song = _name_of_the_song\r\n song_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()", "def save(self):\n self._check_db()\n song = self._create_song()\n song.save()", "def update_title_song(title_song, mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"UPDATE song_data SET title_song = %s \"\n \"WHERE user_id = %s;\", (title_song, mess_chat_id)\n )\n\n connection.commit()", "def update(self, **kwargs):\n if (self.metadata == u'') and (kwargs.get(\"metadata\", u\"\") == u\"\"):\n return\n for key, value in kwargs.iteritems():\n if (key in [\"lp\", \"id\", \"length\", \"filename\", \"metadata\"]):\n if (key == \"metadata\"):\n value = self.fix_encoding(value)\n setattr(self, \"_\" + key, value)\n with MySQLCursor() as cur:\n if (key == \"lp\"):\n # change database entries for LP data\n ldiff = kwargs.get('ldiff', None)\n cur.execute(\"INSERT INTO eplay (`isong`, `dt`, `ldiff`) \\\n VALUES(%s, FROM_UNIXTIME(%s), %s);\",\n (self.songid, int(value), ldiff))\n if (self.afk):\n cur.execute(\"UPDATE `tracks` SET \\\n `lastplayed`=FROM_UNIXTIME(%s) \\\n WHERE `id`=%s LIMIT 1;\", (self._lp, self.id))\n elif (key == \"length\"):\n # change database entries for length data\n cur.execute(\"UPDATE `esong` SET `len`=%s WHERE \\\n id=%s\", (self.length, self.songid))\n elif (key == \"id\"):\n self._filename, temp = self.get_file(value)\n # Update the search index if we need to.\n if key == \"lp\" and self.afk:\n self.update_index()", "def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def on_playing(self):\n\n status = self.player.get_status()\n\n self.player.book.elapsed = float(status['elapsed'])\n self.player.book.part = int(status['song']) + 1\n\n #print \"%s second of part %s\" % (self.player.book.elapsed, self.player.book.part)\n\n self.db_cursor.execute(\n 'INSERT OR REPLACE INTO progress (book_id, part, elapsed) VALUES (%s, %d, %f)' %\\\n (self.player.book.book_id, self.player.book.part, self.player.book.elapsed))\n\n self.db_conn.commit()", "def set_track_data(song, spotipy_instance):\n result = spotipy_instance.track(song['track_id'])\n song['album_id'] = result['album']['id']\n song['track_popularity'] = result['popularity']\n song['album_name'] = result['album']['name']\n song['artist'] = result['artists'][0]['name']\n song['artist_id'] = result['artists'][0]['id']\n song['title'] = result['name']", "def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))", "def _update_songs(self):\n\n songs = self._api.get_all_songs()\n self._library = {}\n self._songs = {}\n\n for song in songs:\n self._songs[song['id']] = song \n if song['artist'] == \"\":\n song['artist'] = \"unknown\"\n if song['album'] == \"\":\n song['album'] = \"unknown\"\n \n if song['artist'] not in self._library.keys():\n self._library[song['artist']] = {}\n if song['album'] not in self._library[song['artist']].keys():\n self._library[song['artist']][song['album']] = {}\n self._library[song['artist']][song['album']]['tracks'] = []\n self._library[song['artist']][song['album']]['tracks'].append(song)\n\n for artist in self._library.keys():\n for album in self._library[artist].keys():\n if len(self._library[artist][album]['tracks']) > 1:\n self._library[artist][album]['tracks'] = sorted(self._library[artist][album]['tracks'], key=lambda k: k.get('trackNumber',''))", "def task_9_update_song_artist():\n # step 1: create the artist\n artist = '...'\n\n # step 2: get the song called 'Superstition'\n song = '...'\n\n # step 3: assign created artist to the song and save() the song model\n pass", "def enterSong(song):\n\tc, conn = connect()\n\tsql = []\n\n\t# checks if the song is already in the database by hash\n\tif checkHash(song):\n\t\tsql2 = appendSong(song)\n\t\tsql += sql2\n\t\t\n\t\t# checks if the song has an artist\n\t\tif song.artist:\n\t\t\tsql2 = appendArtist(song)\n\t\t\tsql += sql2\n\t\n\t\t# checks if the song has an album\n\t\tif song.album:\n\t\t\tsql2 = appendAlbum(song)\n\t\t\tsql += sql2\n\t\n\t# execute all the queries\n\tfor query in sql:\n\t\tc.execute(query)\n\t\t\n\tconn.commit()\n\treturn sql", "def songUpdate(song,cindex):\r\n if cindex == 0:\r\n song[MpMusic.SONGINDEX] = songGetAlbumIndex(song);\r\n return 0;", "def upd_audio_nm_db(form,user,mf):\n\tcpright = form.cleaned_data['owner']\n\tif not cpright:\n\t\tcpright= user.login\n\t\n\tdesc = form.cleaned_data['desc']\n\ttypee = form.cleaned_data['typee']\n\tpermanent = form.cleaned_data['permanent']\n\tIsV = \"TRUE\"\n\tdate_del = datetime.date(9999,12,12)\n\tif not permanent:\n\t\tIsV = \"FALSE\"\n\t\tdate_del = form.cleaned_data['date_del']\n\n\tcursor = connection.cursor()\n\t\n\tcursor.execute(\"\"\"UPDATE other_sound SET description_mma = '%s', date_delete_possible_mma = '%s', permanent_mma = '%s', copyright_mma = '%s', modify_date='%s' WHERE id_mma=%s \"\"\"%(desc,date_del,IsV,cpright,datetime.date.today(),mf.id_mma))\n\t\n\tif typee:\n\t\tcursor.execute(\"\"\"UPDATE other_sound set id_type_other_sound = '%s' WHERE id_mma=%s \"\"\"%(typee.id_type_other_sound,mf.id_mma))\n\t\t\n\tcursor.close()\n\t\n\ttransaction.commit_unless_managed()", "def __insert_song_data(cur, df):\n song_data = (\n df.song_id.values[0],\n df.title.values[0],\n df.artist_id.values[0],\n (df.year.values[0]).item(),\n (df.duration.values[0]).item()\n )\n cur.execute(song_table_insert, song_data)", "def add_song(song):\n try:\n c = connection.cursor()\n c.execute(\"\"\"INSERT INTO DMC_SONG_LIST(id, title, game, category, url) values(?, ?, ?, ?, ?)\"\"\", (song.id, song.title, song.game, song.category, song.url))\n connection.commit()\n\n except Error as e:\n print(e)", "def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Batch from an existing batch id. Notes
def from_batch_id(batch_id: int, *args, **kwargs): b = Batch(*args, **kwargs) assert isinstance(b._backend, _backend.ServiceBackend) b._batch_handle = b._backend._batch_client.get_batch(batch_id) return b
[ "def create_batch_prediction(BatchPredictionId=None, BatchPredictionName=None, MLModelId=None, BatchPredictionDataSourceId=None, OutputUri=None):\n pass", "def create_batch(self, batch_index, *args, **kwargs):\n batch = self.dataset.create_batch(batch_index, *args, **kwargs)\n batch_res = self._exec(batch)\n return batch_res", "def batch_id(self, batch_id):\n\n self._batch_id = batch_id", "def new_batch(self) -> None:\r\n \r\n #get the input values\r\n self.batch_number_temp = self.batch_number.get()\r\n self.quantity_litres_temp = self.quantity_litres.get()\r\n self.beer_type_is_temp = self.beer_type_is.get()\r\n \r\n global running_batches\r\n \r\n #validate user input\r\n try:\r\n int(self.batch_number_temp)\r\n int(self.quantity_litres_temp)\r\n except ValueError:\r\n self.changeme.config(text=\"ERROR: Input not valid\")\r\n else:\r\n \r\n #look for batch with same name\r\n i = 0\r\n batchFound = False\r\n while i < len(running_batches):\r\n if running_batches[i][0] == int(self.batch_number_temp):\r\n batchFound = True\r\n break\r\n i = i + 1\r\n \r\n if batchFound == False:\r\n if int(self.quantity_litres_temp) <= 1000 and int(self.quantity_litres_temp) > 0:\r\n start_time = time.time()\r\n running_batches.append([int(self.batch_number_temp), start_time, \"Hot Brew\", int(self.quantity_litres_temp), True, \"No tank\", self.beer_type_is_temp])\r\n self.changeme.config(text=\"SUCCESS: Batch created\")\r\n \r\n else:\r\n self.changeme.config(text=\"ERROR: Batch volume is not adequate.\")\r\n else:\r\n self.changeme.config(text=\"ERROR: Batch already exists\")", "def get(cls, id):\n\n batch = cls.query.get(id)\n if batch is None:\n raise InvalidBatchId\n return batch", "def add_to_batch(self, created_job_ids):\n batch_folder = BatchFolder(path=self.current_dir)\n if batch_folder.has_batch():\n batch: JobBatch = batch_folder.load()\n else:\n batch = JobBatch(job_ids=[], server=self.get_active_server())\n if batch.server.url != self.get_active_server().url:\n logger.info(\n \"A batch exists in this folder, but for a different server. \"\n \"Not saving job ids in batch\"\n )\n else:\n logger.info(\"Saving job ids in batch in current folder\")\n batch.job_ids = sorted(\n list(set(batch.job_ids) | set(created_job_ids))\n ) # add only unique new ids\n batch_folder.save(batch)", "def test_get_batch_by_id(self):\n pass", "def batch_id(self):\n return self._batch_id", "def sfdcCreateBatch(query, chunk_size=10000, **kwargs):\n api_ver = kwargs.get('api_ver', '')\n session_id = kwargs.get('session_id', '')\n instance = kwargs.get('instance', '')\n job_id = kwargs.get('job_id', '')\n sfdcXml = kwargs.get('sfdcXml', {})\n\n bodyXml = sfdcXml.get('batch', {}).get('body')\n url = sfdcXml.get('batch', {}).get('url')\n headers = sfdcXml.get('batch', {}).get('headers')\n\n bodyXml = unicode(query, \"UTF-8\")\n url = url.format(instance=instance, api_ver=api_ver,\\\n job_id=job_id)\n headers['Content-Type'] = headers.get('Content-Type', '')\\\n .format(chunk_size=chunk_size)\n headers['X-SFDC-Session'] = session_id\n\n resp = requests.post(url=url, headers=headers, data=bodyXml)\n dictResp = xmltodict.parse(resp.text)\n batch_id = str(dictResp['batchInfo']['id'])\n\n return batch_id", "def test_batch_get_with_block_id(self):\n response = self.make_request(batch_id='b' * 127 + '1')\n\n self.assertEqual(self.status.NO_RESOURCE, response.status)\n self.assertFalse(response.batch.SerializeToString())", "def MakeBatch(self, buffer):\r\n raise NotImplementedError('Must implement MakeBatch in a subclass.')", "def nextBatch(self, batch_to_get_id=None):\n\n # batch id to get\n if batch_to_get_id is None:\n batch_to_get_id = self.current_batch_id\n\n # batch to get\n batch_to_get = self.batches[batch_to_get_id]\n\n # check if batch is available in memory / disk\n if batch_to_get.is_stored:\n # get batch data\n X_data, y_data = batch_to_get.getBatchData()\n # return X np array, label array\n return X_data, y_data\n\n # get data of current batch\n urls = list()\n\n for key in batch_to_get.ids:\n value = self.data_dict.data_dict[key]\n batch_to_get.batch_subjects[key] = value\n batch_to_get.y_data.append(value['label'])\n urls.append(value['path'])\n\n # get images using Image Loader class\n binary_images = self.imageLoader.getImages(urls)\n\n # convert images to array\n X_data = self._listOfImagesToNumpy(images=binary_images)\n y_data = np.array(batch_to_get.y_data)\n\n # decide where to store batch\n system_memory_usage_percent = psutil.virtual_memory()[2]\n if (system_memory_usage_percent < 90):\n save_to = \"memory\"\n elif self.disk_scratch is not None:\n save_to = \"disk\"\n elif self.disk_scratch is not None:\n save_to = \"disk_raw\"\n else:\n save_to = \"none\"\n\n # store batch\n batch_to_get.storeBatch(storage=save_to, X_data=X_data,\n y_data=y_data)\n\n # increment current batch\n if self.current_batch_id < (self.n_batches-1):\n self.current_batch_id += 1\n else:\n self.current_batch_id = 0\n\n # return X np array, label array\n return X_data, y_data", "def post(self, batch):\n num_jobs = len(batch)\n plural = \"\" if num_jobs == 1 else \"s\"\n log.info(\"> Sending batch request with %s job%s\", num_jobs, plural)\n data = []\n for i, job in enumerate(batch):\n if job.finished:\n raise Finished(job)\n else:\n job.finished = True\n log.info(\"> {%s} %s\", i, job)\n data.append(dict(job, id=i))\n response = self.resource.post(data)\n log.info(\"< Received batch response for %s job%s\", num_jobs, plural)\n return response", "def add_plant_batch(db_path: str, plant_batch: PlantBatch) -> None:\n plant, location, tray = parse_plant_location_tray_to_dict(plant_batch)\n\n query = f'INSERT INTO batches (Plant, Location, Tray, n_trays, planting_time) VALUES (\"{plant}\", \"{location}\", \"{tray}\", {plant_batch.n_tray}, \"{plant_batch.planting_time.isoformat()}\")'\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'batches.db'))\n curr: Cursor = conn.cursor()\n try:\n curr.execute(query)\n except sqlite3.IntegrityError:\n raise ValueError(\"Error occured\")\n\n conn.commit()\n curr.close()\n conn.close()", "def create_batch(self):\n self.finished_ids = self.get_finished_ids()\n unfinished_ids = self.id_set.difference(self.finished_ids)\n # ignore skipped file ids\n unfinished_ids = list(unfinished_ids.difference(self.skipped_file_ids))\n self.batch_ids = unfinished_ids[:self.config.tagger_one_batch_size]\n batch = [self.mapping_id_file[doc_id] for doc_id in self.batch_ids]\n self.logger.debug(f\"Variable processed_ids contains {len(self.finished_ids)} elements\")\n self.logger.debug(f\"Variable unfinished_ids contains {len(unfinished_ids)} elements\")\n batch_id = None\n batch_file = None\n if batch:\n batch_id = uuid.uuid1()\n batch_file = self.get_batch_file(batch_id)\n # Write batch\n with open(batch_file, \"wt\") as f_batch:\n for fn in batch:\n with open(fn) as f_doc:\n f_batch.write(f_doc.read())\n\n self.logger.debug(\"Created batch ({}, {} files)\".format(batch_id, len(batch)))\n return batch_id, batch_file", "def get_batch(self, name):\n batches = self._meta['sets'].get('batches', {})\n if batches.get(name):\n b = name\n elif batches.get(name):\n b = name\n else:\n raise KeyError('No Batch found named {}.'.format(name))\n return qp.Batch(self, b)", "def sample_batch(db, create_product, sample_product):\n prod = create_product(sample_prod)\n ret_batch = sample_bat.copy()\n ret_batch['product'] = prod.id\n return ret_batch", "def add(\n self,\n batch: RolloutBatchProtocol,\n buffer_ids: Optional[Union[np.ndarray, list[int]]] = None,\n ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n # preprocess batch\n new_batch = Batch()\n for key in set(self._reserved_keys).intersection(batch.keys()):\n new_batch.__dict__[key] = batch[key]\n batch = new_batch\n batch.__dict__[\"done\"] = np.logical_or(batch.terminated, batch.truncated)\n assert {\"obs\", \"act\", \"rew\", \"terminated\", \"truncated\", \"done\"}.issubset(batch.keys())\n if self._save_only_last_obs:\n batch.obs = batch.obs[:, -1]\n if not self._save_obs_next:\n batch.pop(\"obs_next\", None)\n elif self._save_only_last_obs:\n batch.obs_next = batch.obs_next[:, -1]\n # get index\n if buffer_ids is None:\n buffer_ids = np.arange(self.buffer_num)\n ptrs, ep_lens, ep_rews, ep_idxs = [], [], [], []\n for batch_idx, buffer_id in enumerate(buffer_ids):\n ptr, ep_rew, ep_len, ep_idx = self.buffers[buffer_id]._add_index(\n batch.rew[batch_idx],\n batch.done[batch_idx],\n )\n ptrs.append(ptr + self._offset[buffer_id])\n ep_lens.append(ep_len)\n ep_rews.append(ep_rew)\n ep_idxs.append(ep_idx + self._offset[buffer_id])\n self.last_index[buffer_id] = ptr + self._offset[buffer_id]\n self._lengths[buffer_id] = len(self.buffers[buffer_id])\n ptrs = np.array(ptrs)\n try:\n self._meta[ptrs] = batch\n except ValueError:\n batch.rew = batch.rew.astype(float)\n batch.done = batch.done.astype(bool)\n batch.terminated = batch.terminated.astype(bool)\n batch.truncated = batch.truncated.astype(bool)\n if self._meta.is_empty():\n self._meta = create_value(batch, self.maxsize, stack=False) # type: ignore\n else: # dynamic key pops up in batch\n alloc_by_keys_diff(self._meta, batch, self.maxsize, False)\n self._set_batch_for_children()\n self._meta[ptrs] = batch\n return ptrs, np.array(ep_rews), np.array(ep_lens), np.array(ep_idxs)", "def helper_create_batch_item(\n *,\n amount,\n bank_code,\n bank_account_name,\n bank_account_number,\n description,\n external_id,\n email_to=None,\n email_cc=None,\n email_bcc=None,\n **kwargs,\n ):\n params = locals()\n del params[\"kwargs\"]\n\n return BatchDisbursementItem.Query(**params)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new input resource file object representing a single file.
def read_input(self, path: str) -> _resource.InputResourceFile: irf = self._new_input_resource_file(path) return irf
[ "def addinputfile(self, project, inputtemplate, sourcefile, **kwargs):\n if isinstance( inputtemplate, str) or (sys.version < '3' and isinstance( inputtemplate, unicode)): #pylint: disable=undefined-variable\n data = self.get(project) #causes an extra query to server\n inputtemplate = data.inputtemplate(inputtemplate)\n elif not isinstance(inputtemplate, clam.common.data.InputTemplate):\n raise Exception(\"inputtemplate must be instance of InputTemplate. Get from CLAMData.inputtemplate(id)\")\n\n if not isinstance(sourcefile, IOBase):\n sourcefile = open(sourcefile,'rb')\n if 'filename' in kwargs:\n filename = self.getinputfilename(inputtemplate, kwargs['filename'])\n else:\n filename = self.getinputfilename(inputtemplate, os.path.basename(sourcefile.name) )\n\n data = {\"file\": (filename,sourcefile,inputtemplate.formatclass.mimetype), 'inputtemplate': inputtemplate.id}\n for key, value in kwargs.items():\n if key == 'filename':\n pass #nothing to do\n elif key == 'metadata':\n assert isinstance(value, clam.common.data.CLAMMetaData)\n data['metadata'] = value.xml()\n elif key == 'metafile':\n data['metafile'] = open(value,'rb')\n else:\n data[key] = value\n\n\n requestparams = self.initrequest(data)\n if 'auth'in requestparams:\n #TODO: streaming support doesn't work with authentication unfortunately, disabling streaming for now:\n del data['file']\n requestparams['data'] = data\n requestparams['files'] = [('file', (filename,sourcefile, inputtemplate.formatclass.mimetype))]\n if 'metafile' in kwargs:\n del data['metafile']\n requestparams['files'].append(('metafile',('.'+ filename + '.METADATA', open(kwargs['metafile'],'rb'), 'text/xml')))\n else:\n #streaming support\n encodeddata = MultipartEncoder(fields=requestparams['data']) #from requests-toolbelt, necessary for streaming support\n requestparams['data'] = encodeddata\n requestparams['headers']['Content-Type'] = encodeddata.content_type\n r = requests.post(self.url + project + '/input/' + filename,**requestparams)\n sourcefile.close()\n\n if r.status_code == 400:\n raise clam.common.data.BadRequest()\n elif r.status_code == 401:\n raise clam.common.data.AuthRequired()\n elif r.status_code == 403:\n if r.text[0] == '<':\n #XML response\n return self._parseupload(r.text)\n else:\n raise clam.common.data.PermissionDenied(r.text)\n elif r.status_code == 404:\n raise clam.common.data.NotFound(r.text)\n elif r.status_code == 500:\n raise clam.common.data.ServerError(r.text)\n elif r.status_code == 405:\n raise clam.common.data.ServerError(\"Server returned 405: Method not allowed for POST on \" + self.url + project + '/input/' + filename)\n elif r.status_code == 408:\n raise clam.common.data.TimeOut()\n elif not (r.status_code >= 200 and r.status_code <= 299):\n raise Exception(\"An error occured, return code \" + str(r.status_code))\n\n return self._parseupload(r.text)", "def NewFileObject(self, resolver_context, path_spec):\n raise errors.NotSupported(\n 'Missing implementation to create file input/output (IO) object.')", "def from_file(cls, fileobj):\n raise NotImplementedError('from_file not implemented')", "def _make_file_object(data):\n return BinaryFileCR(io.BytesIO(data))", "def LoadResourceFile(input_fname):\n try:\n input_text = files.ReadFileContents(input_fname)\n except files.Error as e:\n raise ResourceFileReadError(six.text_type(e))\n\n file_type = GetResourceFileType(input_fname)\n if file_type == ResourceFileType.JSON:\n try:\n return json.loads(input_text)\n except ValueError as e:\n raise ResourceFileParseError('Error in resource file JSON: ' +\n six.text_type(e))\n elif file_type == ResourceFileType.YAML:\n try:\n return yaml.load(input_text)\n except yaml.YAMLParseError as e:\n raise ResourceFileParseError('Error in resource file YAML: ' +\n six.text_type(e))\n else: # file_type == ResourceFileType.UNKNOWN\n raise ResourceFileTypeError(\n 'Input file [{}] not of type YAML or JSON'.format(input_fname))", "def from_file(filename):\n r = Recipe(filename)\n return r", "def __handleInputFilename(self, filename):\n inputFile = InputFile(filename, self.__inputLanguage)\n self.__inputFiles.append(inputFile)\n return inputFile", "def file(resource_name, local_filepath):\n\n return {\n \"name\": resource_name,\n \"type\": PLI_FILE_TYPE,\n \"local_filepath\": local_filepath\n }", "def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))", "def makeFileResource(path):\n\n path = unwrapStr(path)\n segments = [segment.encode(\"utf-8\") for segment in path.split(u'/')]\n if not path.startswith(u'/'):\n # Relative path.\n segments = os.getcwd().split('/') + segments\n log.log([\"fs\"], u\"makeFileResource.run/1: Relative path '%s'\" % path)\n return FileResource(segments)", "def __init__(self, source=None):\n if source is not None:\n if isinstance(source, (str, unicode)):\n if os.path.isfile(source):\n self.from_file(source)\n elif hasattr(self, \"_FILE_NAME_\"):\n self.from_file(os.path.join(source, self._FILE_NAME_))\n else:\n raise IOError(\"Could not open result {}\".format(source))\n elif isinstance(source, dict):\n self.from_dict(source)\n else:\n self.from_file(source)", "def open(cls, filename: typing.Union[str, bytes, os.PathLike], *, rsrcfork: typing.Optional[bool]=None, **kwargs) -> \"ResourceFile\":\n\t\t\n\t\tf: typing.io.BinaryIO\n\t\tif rsrcfork is None:\n\t\t\t# Determine whether the file has a usable resource fork.\n\t\t\ttry:\n\t\t\t\t# Try to open the resource fork.\n\t\t\t\tf = open(os.path.join(filename, \"..namedfork\", \"rsrc\"), \"rb\")\n\t\t\texcept (FileNotFoundError, NotADirectoryError):\n\t\t\t\t# If the resource fork doesn't exist, fall back to the data fork.\n\t\t\t\tf = open(filename, \"rb\")\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\t# Resource fork exists, check if it actually contains anything.\n\t\t\t\t\tif f.read(1):\n\t\t\t\t\t\t# Resource fork contains data, seek back to start before using it.\n\t\t\t\t\t\tf.seek(0)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Resource fork contains no data, fall back to the data fork.\n\t\t\t\t\t\tf.close()\n\t\t\t\t\t\tf = open(filename, \"rb\")\n\t\t\t\texcept BaseException:\n\t\t\t\t\tf.close()\n\t\t\t\t\traise\n\t\telif rsrcfork:\n\t\t\t# Force use of the resource fork.\n\t\t\tf = open(os.path.join(filename, \"..namedfork\", \"rsrc\"), \"rb\")\n\t\telse:\n\t\t\t# Force use of the data fork.\n\t\t\tf = open(filename, \"rb\")\n\t\t\n\t\t# Use the selected fork to build a ResourceFile.\n\t\treturn cls(f, close=True, **kwargs)", "def get_or_create(cls, fileobj, session):\n sha256 = sha256sum(fileobj)\n # split files between subdirs\n path = build_sha256_path(sha256)\n try:\n # The file exists\n log.debug(\"try opening file with sha256: %s\", sha256)\n file = File.load_from_sha256(sha256, session)\n if file.path is None:\n log.debug(\"file sample missing writing it\")\n save_to_file(fileobj, path)\n file.path = path\n except IrmaDatabaseResultNotFound:\n # It doesn't\n file = cls.create(fileobj, sha256, path, session)\n return file", "def fobj(self):\n return io.open(self.name, self.mode)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Resourcefile':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ResourcefileArgs.__new__(ResourcefileArgs)\n\n __props__.__dict__[\"content_type\"] = None\n __props__.__dict__[\"data\"] = None\n __props__.__dict__[\"environment_id\"] = None\n __props__.__dict__[\"extensions\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"organization_id\"] = None\n __props__.__dict__[\"type\"] = None\n return Resourcefile(resource_name, opts=opts, __props__=__props__)", "def create_file(self, relpath, contents=\"\", encoding=\"\"):\n return Filepath.create_instance(relpath, contents, self, encoding=encoding)", "def inputfile(self, path):\n if isinstance(path, Path):\n path = str(path)\n if self.default_remote_provider is not None:\n path = self.modifier.modify_path(path)\n return IOFile(path)", "def __init__(self, owner, resourceFile):\n log.debug(u\"init resourceFile=%s\" % resourceFile)\n self._storageName = self._fn2ascii(resourceFile)\n self._userName = resourceFile.encode('utf-8')\n self._originalFile = resourceFile\n try:\n self.checksum = resourceFile.md5\n from exe.engine.idevice import Idevice\n if isinstance(owner, Idevice):\n self._idevice = owner\n if owner.parentNode:\n self.package = owner.parentNode.package\n else:\n self.package = None\n else:\n self._idevice = None\n self.package = owner\n finally:\n del self._originalFile", "def open_resource(self, resource):\n return open(os.path.join(self.root_path, resource), 'rb')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new resource group representing a mapping of identifier to input resource files.
def read_input_group(self, **kwargs: str) -> _resource.ResourceGroup: root = secret_alnum_string(5) new_resources = {name: self._new_input_resource_file(file, root) for name, file in kwargs.items()} rg = _resource.ResourceGroup(None, root, **new_resources) self._resource_map.update({rg._uid: rg}) return rg
[ "def declare_resource_group(self, **mappings: Dict[str, Any]) -> 'BashJob':\n\n for name, d in mappings.items():\n assert name not in self._resources\n if not isinstance(d, dict):\n raise BatchException(f\"value for name '{name}' is not a dict. Found '{type(d)}' instead.\")\n rg = self._batch._new_resource_group(self, d, root=name)\n self._resources[name] = rg\n _add_resource_to_set(self._valid, rg)\n return self", "def test_create_resource_group(self):\n pass", "def create_fusion_monitoring_resources():\n templating = Templating(base_path=FUSION_TEMPLATE_DIR)\n ns_name = config.ENV_DATA[\"service_namespace\"]\n logger.info(f\"Creating {ns_name} namespace\")\n exec_cmd([\"oc\", \"new-project\", ns_name])\n exec_cmd(f\"oc label namespace {ns_name} misf.ibm.com/managed=true\")\n logger.info(\"Creating an OperatorGroup\")\n og_path = os.path.join(FUSION_TEMPLATE_DIR, \"operatorgroup.yaml\")\n og_data = load_yaml(og_path)\n helpers.create_resource(**og_data)\n logger.info(\"Creating a CatalogSource\")\n catsource_data = dict()\n catsource_data[\"image\"] = config.ENV_DATA[\"fusion_catalogsource\"]\n template = templating.render_template(\n \"catalogsource.yaml.j2\",\n catsource_data,\n )\n template = yaml.load(template, Loader=yaml.Loader)\n helpers.create_resource(**template)\n logger.info(\"Creating a Subscription\")\n og_path = os.path.join(FUSION_TEMPLATE_DIR, \"subscription.yaml\")\n og_data = load_yaml(og_path)\n helpers.create_resource(**og_data)\n logger.info(\"Waiting for catalogsource\")\n catalog_source = CatalogSource(\n resource_name=\"managed-fusion-catsrc\",\n namespace=ns_name,\n )\n catalog_source.wait_for_state(\"READY\")\n logger.info(\"Creating a monitoring secret\")\n secret_data = dict()\n secret_data[\"pagerduty_config\"] = config.ENV_DATA[\"pagerduty_config\"]\n secret_data[\"smtp_config\"] = config.ENV_DATA[\"smtp_config\"]\n template = templating.render_template(\n \"monitoringsecret.yaml.j2\",\n secret_data,\n )\n template = yaml.load(template, Loader=yaml.Loader)\n helpers.create_resource(**template)", "def create_group(self, groupdata: Dict[str, Any]) -> Group:\n ...", "def create_new_Group(config, oldgroup):\n path = config['file_inputs']['oligos']\n # path = config\n with open(path) as o, open(oldgroup) as g:\n # parse the oligo file and use the barcode_label (4th column) as\n # search pattern, to remove the temp files from m.chimera.vsearch\n oldgroup_file = g.read()\n for barcode_label in (line.split()[3] for line in o.readlines() if 'barcode' in line):\n oldgroup_file = re.sub(rf'{barcode_label}.*', barcode_label, oldgroup_file)\n\n with open(oldgroup + '_new', 'w') as f:\n f.write(oldgroup_file)\n\n return (oldgroup + '_new')", "def RePack(output_file, input_files):\n resources = {}\n for filename in input_files:\n new_resources = data_pack.ReadDataPack(filename)\n\n # Make sure we have no dups.\n duplicate_keys = set(new_resources.keys()) & set(resources.keys())\n if len(duplicate_keys) != 0:\n raise exceptions.KeyError(\"Duplicate keys: \" + str(list(duplicate_keys)))\n\n resources.update(new_resources)\n\n data_pack.WriteDataPack(resources, output_file)", "def createGroup(root, group, fileList):\n topGroupElem = ElementTree.SubElement(root, ELEM_GROUP, {ATTR_NAME: group})\n headerGroupElem = None\n sourceGroupElem = None\n pathElem = None\n for fl in fileList:\n if fl.endswith(\".h\"):\n if headerGroupElem == None:\n headerGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_HEADER})\n pathElem = ElementTree.SubElement(headerGroupElem, ELEM_PATH)\n else:\n if sourceGroupElem == None:\n sourceGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_SRC})\n pathElem = ElementTree.SubElement(sourceGroupElem, ELEM_PATH)\n pathElem.text = fl", "def create_raster_resources(self, file_path):\n extension = os.path.splitext(os.path.normpath(file_path))[1]\n fomart_x = extension[1:]\n file_name = os.path.basename(file_path)\n base = os.path.splitext(file_name)[0]\n resource_pk = []\n if os.path.isfile(file_path) and fomart_x in self.pk_formats:\n sub_dataset_name = file_path\n src_ds = self.get_source(sub_dataset_name)\n if not self.name:\n self.name = os.path.basename(src_ds.GetDescription())\n self.set_global(src_ds)\n\n resource_pk = []\n for band_num in range(1, src_ds.RasterCount + 1):\n bands = OrderedDict()\n srcband = src_ds.GetRasterBand(band_num)\n bands[\"extensions\"] = [fomart_x]\n bands[\"other_paths\"] = \"\"\n bands[\"format\"] = \"raster\"\n bands[\"name\"] = clean_table_name(base)\n bands[\"path\"] = os.path.basename(src_ds.GetDescription())\n bands[\"band_name\"] = base + \"_\" + str(band_num)\n bands[\"no_data_value\"] = srcband.GetNoDataValue()\n bands[\"scale\"] = srcband.GetScale()\n bands[\"color_table\"] = (None\n if not srcband.GetRasterColorTable() else True)\n bands[\"url\"] = None\n bands[\"statistics\"] = OrderedDict(\n zip(\n [\"minimum\", \"maximum\", \"mean\", \"stddev\"],\n srcband.GetStatistics(True, False),\n ))\n resource_pk.append(bands)\n return resource_pk[0]", "def prepare(self):\n os.mkdir(self.out_dir)\n os.mkdir(self.batch_dir)\n\n # compute mapping from id to file\n for file in self.files:\n file_name = os.path.basename(file)\n file_id = int(file_name.split('.')[0])\n self.mapping_id_file[file_id] = file\n self.id_set.add(file_id)", "def create_groups ():\n group_list = ['Cores', 'Coords', 'Vols',]\n for group_name in group_list:\n create_group (group_name)", "def _make_rule(name, mode, resources, destination):\n for resource in resources:\n resource['applies_to'] = ('self'\n if resource['type'] == 'project' else 'children')\n return {\n 'name': name,\n 'mode': mode,\n 'resource': resources,\n 'sink': {\n 'destination': destination,\n 'filter': _SINK_RULE_FILTER,\n 'include_children': '*',\n },\n }", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def create_rg_dict(self, bam_in, rg_dict_out_files, rg_prefix=False):\n if rg_prefix:\n sampleID = os.path.basename(bam_in).rstrip(\".input.bam\")\n prefix_string = \"--prefix %s\" % (sampleID)\n else:\n prefix_string = \"\"\n\n self.cmd(\"python {readgroup_mover} create\\\n {prefix_string} \\\n --input {bam_in}\\\n --output {dict_out}\"\n .format(\n readgroup_mover=self.cmds[\"readgroup_mover\"],\n prefix_string=prefix_string,\n bam_in=bam_in,\n dict_out=rg_dict_out_files[0]\n ),\n on_error=lambda: self.create_error_file(rg_dict_out_files[0]),\n shell=True)\n\n self.checkpoint(rg_dict_out_files[0])\n self.checkpoint(rg_dict_out_files[1])\n self.checkpoint(rg_dict_out_files[2])", "def dict_from_id(x: str) -> dict: # noqa: C901\n obj = copy.deepcopy(RESOURCE_METADATA[x])\n obj[\"name\"] = x\n schema = obj[\"schema\"]\n # Expand fields\n if \"fields\" in schema:\n fields = []\n for name in schema[\"fields\"]:\n # Lookup field by name\n value = Field.dict_from_id(name)\n # Update with any custom group-level metadata\n namespace = obj.get(\"field_namespace\")\n if name in FIELD_METADATA_BY_GROUP.get(namespace, {}):\n value = {**value, **FIELD_METADATA_BY_GROUP[namespace][name]}\n # Update with any custom resource-level metadata\n if name in FIELD_METADATA_BY_RESOURCE.get(x, {}):\n value = {**value, **FIELD_METADATA_BY_RESOURCE[x][name]}\n fields.append(value)\n schema[\"fields\"] = fields\n # Expand sources\n sources = obj.get(\"sources\", [])\n obj[\"sources\"] = [\n DataSource.from_id(value) for value in sources if value in SOURCES\n ]\n encoder = obj.get(\"encoder\", None)\n obj[\"encoder\"] = encoder\n # Expand licenses (assign CC-BY-4.0 by default)\n licenses = obj.get(\"licenses\", [\"cc-by-4.0\"])\n obj[\"licenses\"] = [License.dict_from_id(value) for value in licenses]\n # Lookup and insert contributors\n if \"contributors\" in schema:\n raise ValueError(\"Resource metadata contains explicit contributors\")\n contributors = []\n for source in sources:\n if source in SOURCES:\n contributors.extend(DataSource.from_id(source).contributors)\n obj[\"contributors\"] = set(contributors)\n # Lookup and insert keywords\n if \"keywords\" in schema:\n raise ValueError(\"Resource metadata contains explicit keywords\")\n keywords = []\n for source in sources:\n if source in SOURCES:\n keywords.extend(DataSource.from_id(source).keywords)\n obj[\"keywords\"] = sorted(set(keywords))\n # Insert foreign keys\n if \"foreign_keys\" in schema:\n raise ValueError(\"Resource metadata contains explicit foreign keys\")\n schema[\"foreign_keys\"] = FOREIGN_KEYS.get(x, [])\n # Delete foreign key rules\n if \"foreign_key_rules\" in schema:\n del schema[\"foreign_key_rules\"]\n\n # Add encoders to columns as appropriate, based on FKs.\n # Foreign key relationships determine the set of codes to use\n for fk in obj[\"schema\"][\"foreign_keys\"]:\n # Only referenced tables with an associated encoder indicate\n # that the column we're looking at should have an encoder\n # attached to it. All of these FK relationships must have simple\n # single-column keys.\n encoder = Encoder.dict_from_id(fk[\"reference\"][\"resource\"])\n if len(fk[\"fields\"]) != 1 and encoder:\n raise ValueError(\n \"Encoder for table with a composite primary key: \"\n f\"{fk['reference']['resource']}\"\n )\n if len(fk[\"fields\"]) == 1 and encoder:\n # fk[\"fields\"] is a one element list, get the one element:\n field = fk[\"fields\"][0]\n for f in obj[\"schema\"][\"fields\"]:\n if f[\"name\"] == field:\n f[\"encoder\"] = encoder\n break\n\n return obj", "def create_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n name = args['name']\n description = args.get('description')\n\n raw_response = client.create_intrusion_rule_group(\n name=name,\n description=description,\n )\n\n return parse_results(\n raw_response=raw_response,\n command_headers_by_keys=INTRUSION_RULE_GROUP_HEADERS_BY_KEYS,\n command_title=f'Created {INTRUSION_RULE_GROUP_TITLE}',\n command_context=INTRUSION_RULE_GROUP_CONTEXT,\n )", "def add_group_file(self, filename):\n with open(filename, \"r\") as handle:\n group_dict = json.load(handle)\n return self.add_group(group_dict)", "def create_input_files(self, datasets_dict):\n ifname = self.keywords['inputfile']\n dirstem = os.path.dirname(ifname)\n basename = os.path.basename(ifname).split('.')[0]\n createdfiles=list()\n if dirstem == \"\":\n dirstem = os.getcwd()\n dkeys = datasets_dict.keys()\n dkeys.sort()\n dct=1\n for didx in dkeys:\n newfile = MASTFile()\n newfile.data = list(datasets_dict[didx])\n newname=\"%s/loop_%s_%s.inp\" % (dirstem, basename, str(dct).zfill(2))\n newfile.to_file(newname)\n #createdfiles.append(os.path.basename(newname))\n createdfiles.append(newname)\n dct=dct+1\n return createdfiles", "def defineResources():\n # Read the resources.cfg file and add all resource locations in it\n cf = ogre.ConfigFile()\n cf.load(\"resources.cfg\")\n seci = cf.getSectionIterator()\n while seci.hasMoreElements():\n secName = seci.peekNextKey()\n settings = seci.getNext()\n\n for item in settings:\n typeName = item.key\n archName = item.value\n ogre.ResourceGroupManager.getSingleton().addResourceLocation(\n archName, typeName, secName)", "def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write resource file or resource file group to an output destination. Examples
def write_output(self, resource: _resource.Resource, dest: str): if not isinstance(resource, _resource.Resource): raise BatchException(f"'write_output' only accepts Resource inputs. Found '{type(resource)}'.") if (isinstance(resource, _resource.JobResourceFile) and isinstance(resource._source, job.BashJob) and resource not in resource._source._mentioned): name = resource._source._resources_inverse[resource] raise BatchException(f"undefined resource '{name}'\n" f"Hint: resources must be defined within the " f"job methods 'command' or 'declare_resource_group'") if (isinstance(resource, _resource.PythonResult) and isinstance(resource._source, job.PythonJob) and resource not in resource._source._mentioned): name = resource._source._resources_inverse[resource] raise BatchException(f"undefined resource '{name}'\n" f"Hint: resources must be bound as a result " f"using the PythonJob 'call' method") if isinstance(self._backend, _backend.LocalBackend): dest_scheme = url_scheme(dest) if dest_scheme == '': dest = os.path.abspath(os.path.expanduser(dest)) resource._add_output_path(dest)
[ "def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)", "def WriteToFile(output_file, security_policy, file_format):\n resource_printer.Print(\n security_policy, print_format=file_format, out=output_file)", "def testWriteResourceFiles(self):\n resource_files = ['test.rc']\n\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteResourceFiles(resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b' <ItemGroup>\\r\\n'\n b' <ResourceCompile Include=\"test.rc\" />\\r\\n'\n b' </ItemGroup>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def write(self, destination):\n if not self._errors and not self._warnings:\n return\n destination.write('%s :\\n' % self._asset_name)\n for error in self._errors:\n destination.write(' * ERROR : %s\\n' % error)\n for warning in self._warnings:\n destination.write(' * WARNING : %s\\n' % warning)\n\n destination.write('\\n')", "def writeAPI(res_api, dst_file):\n with open(dst_file, 'w') as api_dst:\n api_dst.write(res_api)", "def write_resource_file(model, outfile):\n\t#Species types are a specific metabolite/enzyme... ignoring localization. \n\t#This framework also includes the references tying the internal names to\n\t#external standardized metabolite and yeast protein names\n\t\n\toutf = open(outfile, \"w\")\n\tspeciesT = model.getListOfSpecies()\n\t\n\t\n\tcount = 0\n\tfor ele in speciesT:\n\t\t\n\t\tcount += 1\n\t\t\n\t\tannotation = ele.getAnnotation()\n\t\t\n\t\tif annotation == None:\n\t\t\tprint \"No annotation:\", ele.getName()\n\t\t\tcontinue\n\t\t\n\t\tchildren = get_chain_down(annotation, \"RDF\", \"Description\", None, \"Bag\", \"li\")\n\t\t\n\t\tfor c in children:\t\t\t\n\t\t\tret = \"\\t\".join((ele.getId(), ele.getName(),\n\t\t\t\t\t\t\t c.getAttributes().getValue(\"resource\")))\n\t\t\toutf.write(ret + \"\\n\")\n\t\t\n\t\t#if count > 10:\n\t#\t\tbreak\n\toutf.close()", "def collect_helper(output_dir, cmd, file_name, resource_name, namespace=None):\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Error when running %s: %s\", cmd, out)\n return\n path = os.path.join(output_dir, file_name)\n with open(path, \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(out)\n logger.info(\"Namespace '%s': Collected %s\", namespace, resource_name)", "def testWriteResourceFiles(self):\n resource_files = ['test.rc']\n\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteResourceFiles(resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b'\\t\\t<Filter\\r\\n'\n b'\\t\\t\\tName=\"Resource Files\"\\r\\n'\n b'\\t\\t\\tFilter=\"rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;'\n b'resx;tiff;tif;png;wav\"\\r\\n'\n b'\\t\\t\\tUniqueIdentifier=\"{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}\"\\r\\n'\n b'\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t<File\\r\\n'\n b'\\t\\t\\t\\tRelativePath=\"test.rc\"\\r\\n'\n b'\\t\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t</File>\\r\\n'\n b'\\t\\t</Filter>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def _write(self, *args, **kwargs):\n raise NotImplementedError('Writing OUTCAR files is not supported.')", "def write(task_spec: TaskSpec, destination_dir: Path, force: bool = False):\n\n file_path: Path = destination_dir / task_spec.filename\n file_path.touch(exist_ok=force)\n\n writable_task_spec: Dict = clean(task_spec)\n\n yaml.dump(writable_task_spec, file_path)", "def write(self, data, dst, label=None, mode='wb'):\n\n self._tag(dst, label)\n self._mkdir_for(dst)\n with open(os.path.join(self.chroot, dst), mode) as wp:\n wp.write(data)", "def write(self, data, dst, label=None, mode='wb'):\r\n\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n with open(os.path.join(self.chroot, dst), mode) as wp:\r\n wp.write(data)", "async def dump(self, resource: ResourceType, location: PhysicalResourceLocation):\n try:\n # Serialize the resource.\n raw = self.serializer(resource)\n # Make sure the target directory exists.\n location.path.parent.mkdir(parents=True, exist_ok=True)\n # Dump the raw data to file.\n await self._dump_raw(raw, location)\n except ResourceDumperError:\n raise\n except Exception as ex:\n raise FailedToDumpResourceError(location.path) from ex", "def write_file(rel_path, text, *args, **kwargs):\n path = os.path.join(os.path.dirname(__file__), \"resources\", rel_path)\n with open(path, 'w+', *args, **kwargs) as _file:\n _file.write(text)", "def testWrite(self):\n contents = 'test contents'\n test_file = 'test.txt'\n writer = output_manager.LocalOutputWriter(\n base_output_dir=self.base_output_dir, unique_dir='unique_dir')\n output_dir = writer.create_output_dir()\n self.remove_dirs.append(output_dir)\n src = os.path.join(self.base_output_dir, test_file)\n dst = os.path.join(output_dir, test_file)\n self.remove_files.append(src)\n self.remove_files.append(dst)\n with open(src, 'w') as file_handle:\n file_handle.write(contents)\n\n self.assertTrue(writer.copy_to(src))\n self.assertTrue(os.path.exists(dst))\n self.assertEqual(contents, open(dst).read())", "def write(self):\n self.output_directory.mkdir(parents=True, exist_ok=True)\n parameter_set_files = [pathlib.Path(set_name) for set_name in\n self.parameter_study.coords[_set_coordinate_key].values]\n if self.write_meta and self.provided_output_file_template:\n self._write_meta(parameter_set_files)\n if self.output_file_type == 'h5':\n self._write_dataset()\n elif self.output_file_type == 'yaml':\n self._write_yaml(parameter_set_files)\n else:\n raise ValueError(f\"Unsupported output file type '{self.output_file_type}'\")", "def write_pds4(\r\n self,\r\n output_directory,\r\n write_product_files=True,\r\n write_label_file=True,\r\n ):\r\n\r\n if self.mode == 'target':\r\n if self.band_name == 'thermal':\r\n self.output_file_1 = self.basename + '_thermal_250.jpg'\r\n else:\r\n self.output_file_1 = self.basename + '_albedo_46.jpg'\r\n self.output_file_2 = self.basename + '_albedo_84.jpg'\r\n self.output_file_3 = self.basename + '_albedo_105.jpg'\r\n\r\n else:\r\n if self.band_name == 'thermal':\r\n self.output_file_1 = self.basename + '_thermal_84.jpg'\r\n else:\r\n self.output_file_1 = self.basename + '_albedo_46.jpg'\r\n\r\n if write_product_files:\r\n print(\"Copying browse images to \" + output_directory)\r\n shutil.copyfile(\r\n self.filename,\r\n output_directory + self.output_file_1\r\n )\r\n for file_pair in [\r\n [self.second_image_path, self.output_file_2],\r\n [self.third_image_path, self.output_file_3]\r\n ]:\r\n if file_pair[1] is None:\r\n continue\r\n shutil.copyfile(\r\n file_pair[0],\r\n output_directory + file_pair[1]\r\n )\r\n self.write_label(\r\n output_directory,\r\n write_label_file,\r\n label_name=self.basename + '_' + self.band_name\r\n )", "def print_to(output_dest, msg):\n if output_dest in special_dest:\n special_dest[output_dest](msg)\n return\n\n with open(output_dest, \"a\", encoding=\"utf-8\") as output_stream:\n output_stream.write(msg)", "def _write(self):\n # Make sure it's group writable\n fp = open(self._claimfile, 'w')\n try:\n fp.write(self._claimfile)\n finally:\n fp.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select all jobs in the batch whose name matches `pattern`. Examples
def select_jobs(self, pattern: str) -> List[job.Job]: return [job for job in self._jobs if job.name is not None and re.match(pattern, job.name) is not None]
[ "def filter_by_name(self, pattern):\n return [t for t in self if pattern in t.name]", "def filter_jobs(jobs, keyword):\n for job in jobs:\n if keyword == \"all\":\n yield job\n elif job[\"name\"].find(keyword) != -1:\n yield job", "def get_build_name_by_pattern(self, pattern=\"client\", namespace=None, filter=None):\n ocp_obj = OCP(kind=\"Build\", namespace=namespace)\n build_names = ocp_obj.exec_oc_cmd(\"get build -o name\", out_yaml_format=False)\n build_names = build_names.split(\"\\n\")\n build_list = []\n for name in build_names:\n if filter is not None and re.search(filter, name):\n log.info(f\"build name filtered {name}\")\n elif re.search(pattern, name):\n (_, name) = name.split(\"/\")\n log.info(f\"build name match found appending {name}\")\n build_list.append(name)\n return build_list", "def search_terms_by_name(self, pattern):\n\n ids_found = filter(lambda tid: pattern in self.G.node[tid]['name'],\n self.G.nodes())\n terms_found = [self.get_term(i) for i in sorted(ids_found)]\n return terms_found", "def filter_by_regexp(self, job_list, regexp, attribute='jobname'):\n # I know this could be written in one line but it would be\n # quite harder to read.\n matching_jobs = []\n for job in job_list:\n try:\n attrvalue = getattr(job, attribute)\n except:\n continue\n if regexp.match(attrvalue):\n matching_jobs.append(job)\n return matching_jobs", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "def do_builds(self, pattern):\n if not pattern:\n print('\\n'.join(self._qm.get_available_stc_builds()))\n return\n\n for build in self._qm.get_available_stc_builds():\n if fnmatch.fnmatch(build, pattern):\n print(build)", "def grep(requestContext, seriesList, pattern):\n regex = re.compile(pattern)\n return [s for s in seriesList if regex.search(s.name)]", "def _get_wild_tasks(self, pattern):\n wild_list = []\n for t_name in self._def_order:\n if fnmatch.fnmatch(t_name, pattern):\n wild_list.append(t_name)\n return wild_list", "def search(self, pattern, return_match=False, **kwargs):\n try: # Python 2\n if isinstance(pattern, basestring):\n pattern = re.compile(pattern)\n except NameError: # Python 3\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n if len(kwargs) == 0:\n labels = self._list\n else:\n labels = self.tslice(**kwargs)\n if return_match:\n return [(l,m) \\\n for l in labels \\\n # TODO: verify that *not* encoding is the correct thing to do\n# for m in [pattern.search(l.text.encode(l.codec))] \\\n for m in [pattern.search(l.text)] \\\n if m]\n else:\n return [l \\\n for l in labels \\\n # TODO: verify that *not* encoding is the correct thing to do\n# if pattern.search(l.text.encode(l.codec))]\n if pattern.search(l.text)]", "def grep(pattern):\n for record in search(pattern):\n print(record)", "def filter_job_list(jobs, role, env, name):\n return [job for job in jobs if fnmatch(job.role, role) and fnmatch(job.env, env)\n and fnmatch(job.name, name)]", "def match(self, pattern):\n import fnmatch\n import re\n\n p = re.compile(fnmatch.translate(pattern))\n names = []\n for name in self._names:\n if p.match(name):\n names.append(name)\n return names", "def queue(self, pattern=None):\n for uuid, d in self.d.items():\n if pattern and dlite.globmatch(pattern, d['meta']):\n continue\n yield uuid", "def Grep(args, callback):\r\n assert len(args) >= 2\r\n pattern = re.compile(args[0])\r\n files = args[1:]\r\n\r\n bucket = store = None\r\n for f in files:\r\n # Parse file name and extract bucket and relative path.\r\n resolved = store_utils.ParseFullPath(f)\r\n assert resolved is not None, 'Cannot determine bucket from %s' % f\r\n b, path = resolved\r\n assert bucket is None or bucket == b, 'Input files must all be in the same bucket'\r\n\r\n if store is None:\r\n # Initialize ObjectStore for this bucket.\r\n bucket = b\r\n store = ObjectStore.GetInstance(bucket)\r\n\r\n # Read file and iterate over each line.\r\n contents = yield gen.Task(store_utils.GetFileContents, store, path)\r\n for line in contents.split('\\n'):\r\n if pattern.search(line):\r\n print '%s:%s' % (f, line)\r\n\r\n callback()", "def search(self, pattern):\n raise NotImplementedError()", "def resources_with(self, pattern: Pattern) -> List[str]:\n return [\n resource for resource in self.get_resource_list() if isinstance(resource, str) and pattern.match(resource)\n ]", "def scan_for_query(seq,searchpattern):\r\n query_matches = []\r\n for i in range(seq.length-len(searchpattern)):\r\n if(seq.body[i:i+len(searchpattern)]==searchpattern):\r\n query_matches.append(i)\r\n return query_matches", "def search(self, pattern=\"*\", mode=\"both\"):\n pattern = self._glob_to_sql(pattern)\n\n COND = \"(keyword.name like ? OR keyword.doc like ?)\"\n args = [pattern, pattern]\n if mode == \"name\":\n COND = \"(keyword.name like ?)\"\n args = [pattern,]\n\n sql = \"\"\"SELECT collection.collection_id, collection.name, keyword.name, keyword.doc\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND %s\n ORDER by collection.collection_id, collection.name, keyword.name\n \"\"\" % COND\n\n cursor = self._execute(sql, args)\n result = [(row[0], row[1], row[2], row[3].strip().split(\"\\n\")[0])\n for row in cursor.fetchall()]\n return list(set(result))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes querysets for keyword and headlinekeyword
def __init__(self): self.keyword_queryset = Keyword.objects.all() self.headlinekeyword_queryset = Headlinekeyword.objects.all() self.headline_queryset = Headline.objects.all()
[ "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def setup_eager_loading(cls, queryset):\n queryset = queryset.prefetch_related('keywords_str')\n queryset = queryset.prefetch_related('tags_str')\n # queryset = queryset.prefetch_related('keywords')\n # queryset = queryset.prefetch_related('tags')\n return queryset", "def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)", "def reset_search_keywords_and_set(self, keywords):\n pass", "def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []", "def init_by_keys(cls, **query):\n raise NotImplementedError()", "def _init_query(self):\n print('Initializing query for %s ...' %self.hashtag)\n init = False\n self.switch = 0\n self.api = self.api_list[self.switch]\n self.remaining_req = self._get_remaining_req()\n self.tweet_count = 0\n self.id_list=[]\n self.first_date = self.until_date\n self.kwargs = {\"q\": self.hashtag, \"count\": self.n_tweets_batch,\n \"since\" : self.since_date,\n \"tweet_mode\" : 'extended', \"full_text\" : True,\n \"wait_on_rate_limit\":True,\n \"wait_on_rate_limit_notify\":True}\n print('Using auth %s' %self.api)\n\n return self", "def get_queryset(self):\n queryset = None \n query = self.request.QUERY_PARAMS.get('q', None)\n\n queryset = SearchQuerySet()\n if query == None:\n queryset = queryset.order_by('price')\n else: \n queryset = queryset.filter(content = Clean(query)).order_by('price')\n\n return queryset", "def pre_search(self, qs):\n return qs", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def set_keyword(self):\n self.obj_keyword, _ = Keyword.objects.get_or_create(keyword=self.keyword)\n\n existed_article_txid = set(self.article_txid_list) - set(self.cleand_txid_list)\n existed_article = [Article.objects.get(article_txid=article_txid) for article_txid in existed_article_txid]\n\n for article in existed_article:\n # 이미 존재하는 아티클에 키워드가 추가되어있지 않다면 키워드를 추가한다.\n if not article.keyword.filter(keyword=self.keyword).exists():\n article.keyword.add(self.obj_keyword)\n article.save()", "def keywords(self, keywords):\n self._keywords = keywords", "def keywords(self, keywords):\n\n self._keywords = keywords", "def defaultKeywords(self, kwSet):\n return QsciLexerJava.keywords(self, kwSet)", "def set_queries(self, **kwargs):\n for k, v in kwargs.items():\n self._query_dict[k] = v", "def keywords(self):\n from hubspot3.keywords import KeywordsClient\n\n return KeywordsClient(**self.auth, **self.options)", "def clearkeywords(self):\n self._kw = []", "def initialize_survey(self, **kwargs):", "def set_keyword_map(self):\n \n ret = defaultdict(list)\n for idx, doc in enumerate(self.docs):\n for token in doc:\n if token in self.dictionary.token2id:\n ret[token].append(idx)\n \n self.keyword_map = ret\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dictionary of the keywords and the list of corresponding headlines (ids only)
def keyword_headlines(self): d = {} for q in self.keyword_queryset: d[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id) return d
[ "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()", "def get_headlines_q(cls, *args):\n top_headlines = {}\n\n print(\"Retrieving top headlines ...\")\n # loop through keywords\n for q in args:\n url = f'https://newsapi.org/v2/top-headlines?q={q}&apiKey={NEWS_API_KEY}'\n\n print(f\"Retrieving top headlines with keyword {q} ...\")\n response = requests.get(url)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n return f\"Error: {e}\"\n\n r_json = response.json()\n articles_pd = cls.convert_to_df(r_json)\n\n # add top headlines to directory\n # key: keywords\n # value: top headlines\n top_headlines[q] = articles_pd\n\n return top_headlines", "def _getKeywordsForBlogEntries(self):\n entries = self.getAllEntries()\n # Use dict rather than list to avoid duplicates\n keywords = {}\n for entry in entries:\n for kw in entry.Subject:\n keywords[kw] = None\n keys = keywords.keys()\n keys.sort()\n return keys", "def get_keyword_list(self):\n return self.keywords", "def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords", "def getkeylist(data, keywords):\n\n datadict = {}\n for n in keywords:\n freq = getkeyfrequency(data, n)\n datadict[n] = freq\n return datadict", "def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]", "def keywords(self):\n return {\n \"unary\": {\n k: v[0] for k, v in self.unary_commands.items()\n },\n \"terminal\": {\n k: v[0] for k, v in self.terminal_commands.items()\n },\n \"binary\": {\n k: v[0] for k, v in self.binary_commands.items()\n },\n }", "def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content", "def parse_keywords(medline):\n keyword_list = medline.find(\"KeywordList\")\n keywords = list()\n if keyword_list is not None:\n for k in keyword_list.findall(\"Keyword\"):\n if k.text is not None:\n keywords.append(k.text)\n keywords = \"; \".join(keywords)\n else:\n keywords = \"\"\n return keywords", "def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results", "def keywords(self):\n kws = self._filter_entities((SimpleKeyword, FactorKeyword))\n return sorted_dict(kws)", "def _get_keywords(source)->list:\n data = json.loads(source)\n return list(data['keywords'])", "def get_keyword_list(self):\n return self.__keyword_list", "def get_entities_dict(p_str):\n nlp = en_core_web_sm.load()\n doc = nlp(p_str)\n entities = {}\n relevant_keywords = []\n list_of_types = ['NORP', 'ORG', 'GPE', 'LAW', 'LANGUAGE']\n for X in doc.ents:\n if not(X.label_ in entities):\n entities[X.label_] = []\n entities[X.label_].append(X.text)\n if X.label_ in list_of_types:\n relevant_keywords.append(X.text)\n print(entities)\n print(\"HERE\")\n print(relevant_keywords)\n return entities, relevant_keywords", "def articles_id_headwords (_id):\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE no = :id\n ORDER BY sortkeyword\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'id' : _id, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def _get_keywords(self, title: str):\n # Prepare data\n keywords = set()\n stops = set(nltk.corpus.stopwords.words(\"english\"))\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n ent_types = [\n \"PERSON\", \"ORGANIZATION\", \"FACILITY\", \"LOCATION\", \"DATE\",\n \"TIME\", \"GPE\", \"MONEY\",\n ]\n excluded_word_types = [\"RB\", \"IN\", \"PRP\"]\n\n # Tokenize and chunk words using NLTK\n tokens = nltk.tokenize.word_tokenize(title)\n positions = nltk.pos_tag(tokens)\n chunk = nltk.ne_chunk(positions)\n\n # Make a word list of keywords we want to add, that\n # are not part of our excluded word types.\n words = set()\n for pos in positions:\n word, word_type = pos\n if word.isalnum() and word_type not in excluded_word_types:\n words.add(word)\n\n # Add all entities to keyword list and remove them from\n # our remaining word set so they don't get added again\n # and stemmed later.\n for subtree in chunk.subtrees(filter=lambda t: t.label() in ent_types):\n for leaf in subtree.leaves():\n keywords.add(leaf[0])\n if leaf[0] in words:\n words.remove(leaf[0])\n\n # Add remaining words in list and stem them to base form,\n # stemming means we change words from e.g. \"eating\" to \"eat\".\n for word in words:\n if word not in stops:\n keywords.add(stemmer.stem(word))\n\n return sorted([keyword.lower() for keyword in keywords])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of keywords
def get_keywords(self): return list(self.keyword_headlines().keys())
[ "def get_keyword_list(self):\n return self.keywords", "def get_keyword_list(self):\n return self.__keyword_list", "def keywords(self):\n return self._keywords", "def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords", "def get_keywords(self):\n\n if str(self.keywords) == \"unset\": return []\n # if self.keywords: return self.keywords\n if len(self.keywords) > 0: return self.keywords\n # retrieve from args and return if exists\n keywords = Settings.get_keywords() or []\n if len(keywords) > 0: return keywords\n if not Settings.prompt(\"keywords\"):\n self.keywords = \"unset\" # used to skip prompting for value in future\n return []\n question = {\n 'type': 'input',\n 'name': 'keywords',\n 'message': 'Keywords:',\n 'validate': ListValidator\n }\n keywords = prompt(question)[\"keywords\"]\n keywords = [n.strip() for n in keywords.split(\",\")]\n # confirm keywords\n if not Settings.confirm(keywords): return self.get_keywords()\n self.keywords = keywords\n return self.keywords", "def keywords(self):\n return self._pyfuncitem.keywords", "def get_keyword_names(keywords):\n\n keyword_names = [keyword['Name'] for keyword in keywords]\n return keyword_names", "def keywords(self):\n kws = self._filter_entities((SimpleKeyword, FactorKeyword))\n return sorted_dict(kws)", "def _get_keywords(source)->list:\n data = json.loads(source)\n return list(data['keywords'])", "def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords", "def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]", "def extract_keywords(query):\n nlp_server_response = __post_request_nlpserver(extract_special_characters(query))\n keywords = []\n\n for sentence in nlp_server_response['sentences']:\n for token in sentence['tokens']:\n if token['pos'] in {'NN', 'JJ', 'NNP', 'NNS', 'NNPS', 'VB', 'VBN', 'VBZ', 'VBP', 'VBG'}:\n if not token[\"lemma\"].lower() in english_stopwords:\n if not token['lemma'] in {'be', 'have'}:\n keywords.append(token['lemma'])\n return keywords", "def keywords(text:str) -> list:\n return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5]", "def keywords(self, **kwargs):\n\n path = self._get_movie_id_path('keywords')\n resp = self._get_method(path, kwargs)\n return resp", "def getKeywords(tmdbKeywords):\n \n words = []\n if \"keywords\" in tmdbKeywords:\n for keyword in tmdbKeywords[\"keywords\"]:\n words += _format(keyword[\"name\"]).split()\n else:\n raise AttributeError(\"%s instance has no attribute keywords\" % tmdbKeywords) \n return words", "def keywords(self):\n if self._desktop:\n keywords = self.get_field('Keywords')\n if keywords:\n return d_('app-install-data',\n keywords).rstrip(';').lower().split(';')\n return []", "def keywords(self):\n defined_keywords = [\n ('allowempty_map', 'allowempty_map'),\n ('assertion', 'assertion'),\n ('default', 'default'),\n ('class', 'class'),\n ('desc', 'desc'),\n ('enum', 'enum'),\n ('example', 'example'),\n ('extensions', 'extensions'),\n ('format', 'format'),\n ('func', 'func'),\n ('ident', 'ident'),\n ('include_name', 'include'),\n ('length', 'length'),\n ('map_regex_rule', 'map_regex_rule'),\n ('mapping', 'mapping'),\n ('matching', 'matching'),\n ('matching_rule', 'matching_rule'),\n ('name', 'name'),\n ('nullable', 'nullable'),\n ('parent', 'parent'),\n ('pattern', 'pattern'),\n ('pattern_regexp', 'pattern_regexp'),\n ('range', 'range'),\n ('regex_mappings', 'regex_mappings'),\n ('required', 'required'),\n ('schema', 'schema'),\n ('schema_str', 'schema_str'),\n ('sequence', 'sequence'),\n ('type', 'type'),\n ('type_class', 'type_class'),\n ('unique', 'unique'),\n ('version', 'version'),\n ]\n found_keywords = []\n\n for var_name, keyword_name in defined_keywords:\n if getattr(self, var_name, None):\n found_keywords.append(keyword_name)\n\n return found_keywords", "def get_all_keywords(resource):\n keywords = []\n resource.populate()\n for res in [i for i in resource.imports.data if isinstance(i, robot.parsing.settings.Resource)]:\n keyword_file = os.path.abspath('{}/{}'.format(res.directory, res.name))\n if keyword_file not in processed:\n res_obj = ResourceFile(keyword_file)\n processed[keyword_file] = res_obj\n keywords += get_all_keywords(res_obj)\n for keyword in resource.keywords:\n print(keyword.name)\n keywords.append(tuple((keyword.source, keyword.name, keyword.args.value if keyword.args.value else [])))\n return keywords", "def GetKeywordList(self, idx):\n return self._keywords.get(idx, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of headlines if given a keyword
def get_headlines(self, kw = None): if kw: return self.get_headlines_with_keyword(kw) else: return self.get_all_headlines()
[ "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def all_headlines_from(url):\n pass", "def get_headlines_from_search_results(filename):\n\n pass", "def get_headlines(url):\n feed = feedparser.parse(url)\n headlines = [x['title'] for x in feed['entries']]\n return headlines", "def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines", "def get_headlines_q(cls, *args):\n top_headlines = {}\n\n print(\"Retrieving top headlines ...\")\n # loop through keywords\n for q in args:\n url = f'https://newsapi.org/v2/top-headlines?q={q}&apiKey={NEWS_API_KEY}'\n\n print(f\"Retrieving top headlines with keyword {q} ...\")\n response = requests.get(url)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n return f\"Error: {e}\"\n\n r_json = response.json()\n articles_pd = cls.convert_to_df(r_json)\n\n # add top headlines to directory\n # key: keywords\n # value: top headlines\n top_headlines[q] = articles_pd\n\n return top_headlines", "def get_head_phrases(self, phrase):\n # retrieve the head path and begin walking down it\n if len(phrase) == 3:\n head_phrases = list(nt.get_head_path(phrase))\n else:\n head_phrases = [phrase]\n return head_phrases", "def all_headlines(html_root_node):\n pass", "def gather_headlines(urls):\n pass", "def check_if_in_head(name_head, headlines_list):\n if name_head in headlines_list:\n is_headline = 1\n else:\n is_headline = 0\n return is_headline", "def is_headline(node):\n pass", "def get_negative_headlines(headlines,words):\r\n negative_headlines = []\r\n for headline in headlines:\r\n for word in words:\r\n if headline.lower().find(word) != -1: #If particular word is found in lowercased headline.\r\n negative_headlines.append(headline)\r\n break #Stop iterating through words when we have found one negative word.\r\n return negative_headlines", "def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def get_headlines(issue_description2):\n heads = []\n issue_description = issue_description2\n if issue_description is not None:\n issue_description = issue_description.replace('\\\\n', '\\n')\n issue_description = issue_description.replace('\\\\r', '\\r')\n info_description_changes = issue_description.split('\\n')\n for index, value in enumerate(info_description_changes):\n result = re.search('h1. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n result = re.search('h2. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n else:\n result = re.search('h2. (.*)', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n result = re.search('h3. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n result = re.search('h4. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n if value is not None:\n if '*story*' in value.lower() or '*story:*' in value.lower():\n heads.append('story')\n if '*acceptance criteria*' in value.lower() or '*acceptance criteria:*' in value.lower():\n heads.append('acceptance criteria')\n if '*requirements*' in value.lower() or '*requirements:*' in value.lower():\n heads.append('requirements')\n if '*definition of done*' in value.lower() or '*definition of done:*' in value.lower():\n heads.append('definition of done')\n if '*design*' in value.lower() or '*design:*' in value.lower():\n heads.append('design')\n if '*stakeholders*' in value.lower() or '*stakeholders:*' in value.lower():\n heads.append('stakeholders')\n if '*review steps*' in value.lower() or '*review steps:*' in value.lower():\n heads.append('review steps')\n if '*questions*' in value.lower() or '*questions:*' in value.lower():\n heads.append('questions')\n if '*implementation notes*' in value.lower() or '*implementation notes:*' in value.lower():\n heads.append('implementation notes')\n if '*notes*' in value.lower() or '*notes:*' in value.lower():\n heads.append('notes')\n\n return heads", "def find(bowl, keyword):\n keywords = bowl.find_all(keyword)\n return [item.get_text(\" \", strip=True) for item in keywords]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of the headlines with the corresponding keyword
def get_headlines_with_keyword(self, kw): key_head = self.keyword_headlines() headlines = set() for headlinekw in key_head[kw]: content = headlinekw.headlineid.content headlines.add(content) return list(headlines)
[ "def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()", "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def get_headlines_from_search_results(filename):\n\n pass", "def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results", "def all_headlines_from(url):\n pass", "def get_headlines_q(cls, *args):\n top_headlines = {}\n\n print(\"Retrieving top headlines ...\")\n # loop through keywords\n for q in args:\n url = f'https://newsapi.org/v2/top-headlines?q={q}&apiKey={NEWS_API_KEY}'\n\n print(f\"Retrieving top headlines with keyword {q} ...\")\n response = requests.get(url)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n return f\"Error: {e}\"\n\n r_json = response.json()\n articles_pd = cls.convert_to_df(r_json)\n\n # add top headlines to directory\n # key: keywords\n # value: top headlines\n top_headlines[q] = articles_pd\n\n return top_headlines", "def get_headlines(url):\n feed = feedparser.parse(url)\n headlines = [x['title'] for x in feed['entries']]\n return headlines", "def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines", "def get_head_phrases(self, phrase):\n # retrieve the head path and begin walking down it\n if len(phrase) == 3:\n head_phrases = list(nt.get_head_path(phrase))\n else:\n head_phrases = [phrase]\n return head_phrases", "def all_headlines(html_root_node):\n pass", "def gather_headlines(urls):\n pass", "def get_headlines(issue_description2):\n heads = []\n issue_description = issue_description2\n if issue_description is not None:\n issue_description = issue_description.replace('\\\\n', '\\n')\n issue_description = issue_description.replace('\\\\r', '\\r')\n info_description_changes = issue_description.split('\\n')\n for index, value in enumerate(info_description_changes):\n result = re.search('h1. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n result = re.search('h2. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n else:\n result = re.search('h2. (.*)', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n result = re.search('h3. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n result = re.search('h4. (.*)\\r', value)\n if result is not None:\n heads.append(clean_text(result.group(1)).replace(\":\", \"\").lower())\n if value is not None:\n if '*story*' in value.lower() or '*story:*' in value.lower():\n heads.append('story')\n if '*acceptance criteria*' in value.lower() or '*acceptance criteria:*' in value.lower():\n heads.append('acceptance criteria')\n if '*requirements*' in value.lower() or '*requirements:*' in value.lower():\n heads.append('requirements')\n if '*definition of done*' in value.lower() or '*definition of done:*' in value.lower():\n heads.append('definition of done')\n if '*design*' in value.lower() or '*design:*' in value.lower():\n heads.append('design')\n if '*stakeholders*' in value.lower() or '*stakeholders:*' in value.lower():\n heads.append('stakeholders')\n if '*review steps*' in value.lower() or '*review steps:*' in value.lower():\n heads.append('review steps')\n if '*questions*' in value.lower() or '*questions:*' in value.lower():\n heads.append('questions')\n if '*implementation notes*' in value.lower() or '*implementation notes:*' in value.lower():\n heads.append('implementation notes')\n if '*notes*' in value.lower() or '*notes:*' in value.lower():\n heads.append('notes')\n\n return heads", "def get_headlines(cls, **context):\n\n # get returned dict from retrieve_sources_from_lang\n ti = context['ti']\n source_info = ti.xcom_pull(task_ids='retrieve_sources_from_lang')\n\n top_headlines = {}\n\n print(\"Retrieving top headlines ...\")\n # loop through source dict\n for source_id, source_name in source_info.items():\n url = f'https://newsapi.org/v2/top-headlines?sources={source_id}&apiKey={NEWS_API_KEY}'\n\n response = requests.get(url)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n return f\"Error: {e}\"\n\n r_json = response.json()\n articles_pd = cls.convert_to_df(r_json)\n\n # add top headlines to directory\n # key: source names\n # value: top headlines\n top_headlines[source_name] = articles_pd\n\n return top_headlines", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)", "def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )", "def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr", "def get_keyword_list(self):\n return self.keywords" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the min number of refills to reach 'distance'. You start with a full tank.
def compute_min_refills(distance: int, tank: int, stops: List[int]): location: int = 0 n_stops = 0 last_stop = 0 max_drive = location + tank while max_drive < distance: counter = 0 # Handle the case that stops are depleted before we reach distance if len(stops) == 0: return -1 for s in stops: if s <= max_drive: counter += 1 last_stop = s max_drive = last_stop + tank # Handle the case that wi did not reach the next stop if counter == 0: return -1 else: del stops[0:counter] n_stops += 1 return n_stops
[ "def compute_min_refills(distance: int, tank: int, stops: list):\n previous, current = 0, 0\n positions = [0] + stops + [distance]\n\n num_refills, cur_position = 0, 0\n\n while current <= len(stops):\n previous = current\n\n while current <= len(stops) and (\n positions[current + 1] - positions[previous]\n ) <= tank:\n current += 1\n cur_position = positions[current]\n\n if current == previous:\n return -1 # destination not possible\n\n if cur_position < distance:\n num_refills += 1\n\n return num_refills", "def compute_min_refills(distance, tank, stops):\n\n i = 0\n refills = 0\n stops.append(distance) # add destination as the last stop\n\n while True:\n # print(\"checking stop {}, {}m away\".format(i, stops[i]))\n if stops[i] < tank: # stop is reachable on a full tank\n\n if (stops[i] == stops[-1]): # if this is the last stop, no more refills needed\n return refills\n\n else: # check the next stop\n i += 1\n\n elif stops[i] == tank: # just nice, refill here!\n\n if (stops[i] == stops[-1]): # if this is the last stop, no more refills needed\n return refills\n\n refills += 1\n # print(\"refill at stop {}\".format(i))\n\n distance_travelled = stops[i]\n\n stops = stops[i + 1:] # remove all stops up to the (i+1)th stop\n\n # reset distances between stops (wrt to refill stop)\n stops = [x - distance_travelled for x in stops]\n i = 0 # reset pointer\n\n else: # stop is too far away, refill at previous stop (i-1)\n\n if i == 0:\n return -1 # the first stop is too far away and impossible to reach\n\n refills += 1 # refill at the (i-1)th stop\n # print(\"refill at stop {}\".format(i))\n\n distance_travelled = stops[i - 1]\n stops = stops[i:] # remove all stops up to the ith stop\n\n # reset distances between stops (wrt to refill stop)\n stops = [x - distance_travelled for x in stops]\n\n i = 0 # reset pointer", "def minSkips(self, dist: list[int], speed: int, hoursBefore: int) -> int:\n N = len(dist)\n dp: list[float] = [0] * (N + 1)\n for i, d in enumerate(dist):\n for j in range(N, -1, -1):\n dp[j] += d\n if i < N - 1:\n dp[j] = (dp[j] + speed - 1) // speed * speed\n \n if j:\n dp[j] = min(dp[j], dp[j - 1] + d)\n \n target = speed * hoursBefore\n for i, t in enumerate(dp):\n if t <= target:\n return i\n \n return -1", "def min_soldiers_needed(self, enemy_dist):\n minimum = 100\n my_winning_dist = []\n for way in self.ways_to_win:\n soldiers = 0\n my_dist = []\n for i in range(10):\n if way[i] == 'W':\n soldiers += enemy_dist[i]+1\n my_dist.append(enemy_dist[i]+1)\n elif way[i] == 'T':\n soldiers += enemy_dist[i]\n my_dist.append(enemy_dist[i])\n else:\n my_dist.append(0)\n if soldiers < minimum:\n my_winning_dist = my_dist\n minimum = soldiers\n return minimum, my_winning_dist", "def num_points_in_distance(d):\n return 1 + 3 * d * (d + 1)", "def calc_points_hit(num_transfers, free_transfers):\n if num_transfers == \"W\" or num_transfers == \"F\":\n return 0\n elif isinstance(num_transfers, int):\n return max(0, 4 * (num_transfers - free_transfers))\n elif (num_transfers.startswith(\"B\") or num_transfers.startswith(\"T\")) and len(\n num_transfers\n ) == 2:\n num_transfers = int(num_transfers[-1])\n return max(0, 4 * (num_transfers - free_transfers))\n else:\n raise RuntimeError(\n \"Unexpected argument for num_transfers {}\".format(num_transfers)\n )", "def getMinimumDistancePacmanLand(self, pos):\n minD = 10000\n for p in self.ghostLandPositions:\n minD = min(minD, self.getMazeDistance(pos, p))\n return minD", "def getMinimumDistanceOpponent(self, idx, pos, defense=False):\n minD = 10000\n if defense:\n for p in self.beliefs[idx]:\n minD = min(minD, self.getMazeDistanceDefense(pos, p))\n else:\n for p in self.beliefs[idx]:\n minD = min(minD, self.getMazeDistance(pos, p))\n return minD", "def cummulative_distance(self):\n return self.distance.cumsum()", "def part_1(distances: Distances) -> int:\n\n result, _ = min(generate_routes(distances))\n print(f\"part 1: shortest route has distance {result}\")\n return result", "def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance", "def distance_tolerance(distance: float) -> float:\n ret = 10.0\n if distance < 0:\n ret += distance * (100 - ret) / -2500.0\n return ret", "def score_waypoint(distance):\n return max(0,\n float(settings.SATISFIED_WAYPOINT_DIST_MAX_FT - distance) /\n settings.SATISFIED_WAYPOINT_DIST_MAX_FT)", "def make_exceeding_distance_cost(allowed_distance):\n\tdef exceeding_distance_cost(node, caller_relative_probabilities, caller_distances):\n\t\treturn np.sum(caller_relative_probabilities[caller_distances > allowed_distance])\n\n\treturn exceeding_distance_cost", "def manhattan_distance(path1, path2):\n # Convert each path into a list of locations it passed through\n trail1 = path_to_trail(path1)\n trail2 = path_to_trail(path2)\n\n # Find all locations that exist in each trail\n collisions = set(trail1) & set(trail2)\n # Origin does not count as an intersection\n collisions -= {(0, 0)}\n # Convert all collisions into distances from origin\n distances = [abs(x) + abs(y) for x, y in collisions]\n return min(distances)", "def get_incremental_distance():\n return current_speed/float(FPS)", "def distance_next(self):\n d = 0\n cur = self\n while True:\n try:\n next = cur.get_next_in_order()\n d += D(m=cur.mark.location.distance(next.mark.location)).nm\n if not next.is_waypoint:\n break\n except CourseMark.DoesNotExist:\n break\n \n if not next.is_waypoint:\n break\n cur = next\n return d or None", "def apply_penalty(self, distance):\n\n self.cities_hit += 1 # Adds to the counter of cities without visiting a prime.\n\n if self.cities_hit > 10: # If Santa has visted more than 10 cities ...\n penalty_distance = (\n distance * 0.1) + distance # ...Applies the penalty for not showing up to a prime city...\n return penalty_distance # ...and returns the value based on the penalty\n else:\n return distance # Else return the distance.", "def num_shortest(roadmap, elevations, start, end):\n return algorithm(roadmap, elevations, start, end)[1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Place the vertex v at position, and apply transformation T. Return the grid points that are occupied by the piece.
def place( self, position, v, T): geo = (self.geo - self.geo[v]).dot( T) return position + geo
[ "def getPoint(self, u, v):\r\n P = matrix(np.ones((4, 1)))\r\n P.set(0, 0, self.__radius * cos(v) * u)\r\n P.set(1, 0, self.__radius * sin(v) * u)\r\n P.set(2, 0, 0.0)\r\n return P", "def project_vector(u, v):\n u_np = np.array([u.get_x(), u.get_y()])\n v_np = np.array([v.get_x(), v.get_y()])\n proj = (np.dot(u_np, v_np) / np.dot(v_np, v_np)) * v_np\n return Point(proj[0], proj[1])", "def translate(self, v: Union[Vector, Direction]) -> \"Point\":\n if isinstance(v, Direction):\n v = v.vector\n return Point(self.y + v.dy, self.x + v.dx)", "def translate(self, tr):\n c = self.c -self.a*tr[0] -self.b*tr[1]\n self.c =c\n self.pointN = self.pointN+tr\n self.point1 = self.point1+tr\n self.points +=tr", "def Translate(point, T):\r\n point = Matrix([[point[0]],\r\n [point[1]],\r\n [point[2]],\r\n [1]])\r\n T = Matrix(T)\r\n final_matrix = T @ point\r\n final_list = final_matrix.list\r\n point_x = final_list[0][0]\r\n point_y = final_list[1][0]\r\n point_z = final_list[2][0]\r\n return [point_x, point_y, point_z]", "def transform(self, V): \n raise NotImplementedError(\"Please Implement this method\")", "def move_vertex(self, p, v, x, y, z):\n self._move_vertex(p, v, x, y, z)", "def move_vertex(self,vert_idx,new_pos):\n before = marker_corners_norm\n after = before.copy()\n after[vert_idx] = new_pos\n transform = cv2.getPerspectiveTransform(after,before)\n for m in self.markers.values():\n m.uv_coords = cv2.perspectiveTransform(m.uv_coords,transform)", "def getPointAt(self,u,v,w):\r\n udeg = self.udegree\r\n uspan = sg.NurbsCurve.findSpan(u,udeg,self.uknots) \r\n Nu = sg.NurbsCurve.basisFunctions(uspan, u, udeg, self.uknots)\r\n\r\n vdeg = self.vdegree\r\n vspan = sg.NurbsCurve.findSpan(v,vdeg,self.vknots)\r\n Nv = sg.NurbsCurve.basisFunctions(vspan, v, vdeg, self.vknots)\r\n\r\n wdeg = self.wdegree\r\n wspan = sg.NurbsCurve.findSpan(w,wdeg,self.wknots)\r\n Nw = sg.NurbsCurve.basisFunctions(wspan, w, wdeg, self.wknots)\r\n \r\n tmp = [[None for i in xrange(vdeg+1)] for j in xrange(wdeg+1)]\r\n for i in xrange(0,wdeg+1):\r\n for j in xrange(0,vdeg+1):\r\n tmpVec = Vector4(0,0,0,0)\r\n for k in xrange(0,udeg+1):\r\n tmpVec += self.points[uspan-udeg+k][vspan-vdeg+j][wspan-wdeg+i] * Nu[k]\r\n tmp[i][j] = tmpVec\r\n \r\n tmp2 = [None for i in xrange(wdeg+1)]\r\n for i in xrange(0,wdeg+1):\r\n tmpVec = Vector4(0,0,0,0)\r\n for j in xrange(0,vdeg+1):\r\n tmpVec += tmp[i][j] * Nv[j]\r\n tmp2[i] = tmpVec\r\n \r\n res = Vector4(0,0,0,0)\r\n for i in xrange(0,wdeg+1):\r\n res += tmp2[i] * Nw[i]\r\n \r\n if res.w != 0:\r\n return res.project()\r\n else:\r\n return Vector3(res.x,res.y,res.z)", "def transform_ip(self, H): # or update()\n self.vh = H @ self.vertices.T\n self.vh = self.vh.T \n self.va = self.vh[:,:2]", "def _vp_move_grid_to_current_stage_position(self):\n x, y = self.stage.get_xy()\n self.gm[self.selected_grid].centre_sx_sy = [x, y]\n self.gm[self.selected_grid].update_tile_positions()\n self.gm.magc_roi_mode = False\n self.gm.update_source_ROIs_from_grids()\n self.vp_draw()", "def project(p, v, dim):\n p = normalize(p, dim)\n vert = torch.sum(p * v, dim=dim, keepdim=True) * p\n return v - vert", "def project(v, w):\r\n projection_length = dot(v,w)\r\n return scalar_multiply(projection_length, w)", "def vector_trans(self, v, T, V0):\n v = np.array(v)\n newv = np.add(v[0:2].dot(T), V0)\n self.log.debug(\"Transformation of vector {}, with transformation matrix {} nad V0 {}, to: {}\".format(v, T, V0, newv))\n return newv", "def translate_points(self):\n self.x += self.trans_flag * self.domain.L\n self.points += self.x - self.sphere.x", "def __call__(self, t) -> 'Point':\n\t\treturn (self.o + self.d * t).view(Point)", "def _vp_reposition_grid(self, shift_vector):\n dx, dy = shift_vector\n old_grid_origin_dx, old_grid_origin_dy = (\n self.gm[self.selected_grid].origin_dx_dy)\n new_grid_origin_dx = old_grid_origin_dx + dx / self.cs.vp_scale\n new_grid_origin_dy = old_grid_origin_dy + dy / self.cs.vp_scale\n # Set new grid origin and redraw.\n self.gm[self.selected_grid].origin_sx_sy = (\n self.cs.convert_d_to_s((new_grid_origin_dx, new_grid_origin_dy)))\n self.vp_draw()", "def eval_2pts(self, vector, t):\n if t < 0 or t > 1:\n raise Exception(\"Cannot Eval \", e, \" with t=\", t, \": t Should Satisfy 0<=t<=1.\")\n else:\n return Vector((1-t)*self.x + t*vector.x, (1-t)*self.y + t*vector.y, (1-t)*self.z + t*vector.z)", "def transform(self, matrix : np.matrix) -> 'Pointcloud':\n # Note that this method is inefficient, it can probably be done\n # in-place with some numpy magic\n self._ensure_cwipc()\n assert self.cwipc\n pcpoints = self.cwipc.get_points()\n points = []\n colort = []\n for p in pcpoints:\n points.append([p.x, p.y, p.z]) \n colort.append((p.r, p.g, p.b, p.tile))\n npPoints = np.matrix(points)\n submatrix = matrix[:3, :3]\n translation = matrix[:3, 3]\n npPoints = (submatrix * npPoints.T).T\n npPoints = npPoints + translation\n assert len(npPoints) == len(points)\n newPoints = []\n for i in range(len(points)):\n newPoint = tuple(npPoints[i].tolist()[0]) + colort[i]\n newPoints.append(newPoint)\n return self.__class__.from_points(newPoints)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate all nondegenerate placements, with one of the vertices placed at (0,0). Return the placements as [ (v, T) ], where v is the vertex to be placed at (0,0), and T the 2x2 transformation matrix that place the piece according to self.geo[v] + T.dot(self.geo self.geo[v])
def findNondegeneratePlacements( self): # Rotate counterclockwise by 90 degrees around the v'th vertex. r90 = np.array( [ [0,1], [-1,0] ], dtype=int) # Flip the piece along the vertical axis through the v'th vertex. fv = np.array( [ [1,0], [0,-1] ], dtype=int) self.placements = [] uniques = set() # Unique placements generated so far identity = np.array( [ [1,0], [0,1] ], dtype=int) T = identity[:,:] for i in xrange(self.nVertices): geo = self.geo[:,:] geo -= geo[i] # Place i'th vertex at (0,0) for r in xrange(4): T = T.dot( r90) for f in xrange(2): T = T.dot( fv) pk = placementKey( geo.dot(T)) if (not pk in uniques): uniques.add( pk) self.placements.append( (i, T)) # After four rotations and two flips, we should be back to # the original position. assert( np.array_equal( T, identity)) return self.placements
[ "def generate_all_locations(grid, shape):", "def test_create_new_placements(self):\n subv = SimpleMachineVertex(None, \"\")\n pl = Placement(subv, 0, 0, 1)\n Placements([pl])", "def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells", "def generate_zero(self):\n # generate map 0 - ocean, 1 - land\n # number of continents / count of start points\n continents = random.randint(self.min_continents, self.max_continents)\n self.params['continents'] = continents\n # size of land by percents - %\n size_land = random.randint(self.min_land, self.max_land)\n self.params['size_land'] = size_land\n # square of map - math → width * height = all points of map\n all_size = self.size * self.size\n # square of land, real, points\n size_land = round((size_land / 100.0) * all_size)\n # square of ocean, points\n size_ocean = all_size - size_land\n # squares of all continents\n sq_cont = {}\n # for devision land for continents\n about_sq = round(size_land / float(continents))\n min_sq = size_land\n # devision land for continents\n for continent in xrange(1, continents+1):\n sq_cont[continent] = about_sq\n min_sq = min_sq - sq_cont[continent]\n # start generate continents\n for continent in xrange(1, continents + 1):\n xy_ok = False\n # get start coordinates\n while not xy_ok:\n start_x = random.randint(0, self.size-1)\n start_y = random.randint(0, self.size-1)\n # if water - ok\n if self.maps[0][(start_x, start_y)] == 0:\n self.maps[0][(start_x, start_y)] = 1\n xy_ok = True\n\n # for increase continent, decrease free points of continent\n n_land = sq_cont[continent] - 1\n iterations = 0\n max_iter = n_land\n coord_cont = [(start_x,start_y)]\n while n_land > 0:\n # get random target from posible coordinates\n target = coord_cont[random.randint(0, len(coord_cont)-1 )]\n # get square 3x3 around target\n square_target = self.maps.get_round_xy_land(target, self.size)\n # get random point from 3x3\n x,y = square_target[random.randint(0, len(square_target)-1 )]\n # if water then will be land\n if self.maps[0][(x,y)] == 0:\n self.maps[0][(x,y)] = 1\n coord_cont.append((x,y))\n n_land -= 1\n\n for sea in xrange(random.randint(continents, continents * 2)):\n xy_ok = False\n # get start coordinates\n while not xy_ok:\n start_x = random.randint(0, self.size-1)\n start_y = random.randint(0, self.size-1)\n # if water - ok\n if self.maps[0][(start_x, start_y)] == 1:\n self.maps[0][(start_x, start_y)] = 0\n for i in xrange(random.randint(1,6)):\n square_target = self.maps.get_round_xy_land((start_x, start_y), self.size)\n x,y = square_target[random.randint(0, len(square_target)-1 )]\n self.maps[0][(x, y)] = 0\n xy_ok = True", "def get_neighbours(self) -> Generator['Position', None, None]:\n for dc in range(-1, 2):\n for dy in range(-1, 2):\n if dc != 0 or dy != 0:\n p = self + Vector2(dc, dy)\n if p.is_valid():\n yield p", "def place_allowed_tower_sites():\n self.coordinates__tower_sites = []\n for tk in xrange(self.N_tower_kinds):\n #Each kind of tower will have the correct number of sites placed\n \n coords = []\n while len(coords)<self.N_tower_sites[tk]:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y) \n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords.append(p)\n self.coordinates__tower_sites.append(coords)", "def clear_sets_of_points(self):\r\n for place in self: place.set_of_points = np.ones((0, 3))", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def test_get_placements(self):\n subv = list()\n for i in range(5):\n subv.append(SimpleMachineVertex(None, \"\"))\n\n pl = list()\n for i in range(4):\n pl.append(Placement(subv[i], 0, 0, i))\n\n pls = Placements(pl)\n container = pls.placements\n for i in range(4):\n self.assertIn(pl[i], container)", "def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island", "def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3", "def generate(self):\n coordslist = []\n allpositions = []\n\n aiwords = self.choosewords()\n\n while self.aiboats != []: #loops until all ships have been placed\n row, column = randrange(1,10), randrange(1,10)\n orientation = bool(getrandbits(1)) #orientation is either True or False and chosen at random\n aiships = []\n currentpositions = []\n \n for i in range(self.aiboats[0]):\n if orientation and row + self.aiboats[0] < 10:\n currentpositions.append(str(row + i) + str(column))\n elif not orientation and column + self.aiboats[0] < 10:\n currentpositions.append(str(row) + str(column + i))\n\n if not set(allpositions).intersection(set(currentpositions)):\n #intersection() checks if any item from set 2 is in set 1. if that is not the case it returns False\n for i, position in enumerate(currentpositions):\n allpositions.append(position)\n aiships.append(position + aiwords[0][i])\n\n if aiships != []:\n for pos in aiships:\n coordslist.append(pos)\n self.aiboats.remove(self.aiboats[0])\n aiwords.remove(aiwords[0])\n\n return coordslist", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def __create_village(self):\n # find suitable random start point for village\n start_x = random.randint(0, self.GRID_X-1)\n start_y = random.randint(0, self.GRID_Y-1)\n if self.map[start_x][start_y][\"river\"]:\n self.__create_village()\n return\n while self.map[start_x][start_y][\"castle\"]:\n self.__create_village()\n return\n\n # generate points of village\n village_points = []\n for i in range(self.VILLAGE_SIZE):\n if i == 0:\n village_points.append((start_x, start_y))\n else:\n # get neighbors of current points and pick random suitable\n neighbors = utils.get_neighbors(village_points,\n (self.GRID_X, self.GRID_Y))\n point = neighbors[random.randint(0, len(neighbors)-1)]\n while self.map[point[0]][point[1]][\"river\"]:\n point = neighbors[random.randint(0, len(neighbors)-1)]\n neighbors.remove(point)\n while self.map[point[0]][point[1]][\"castle\"]:\n point = neighbors[random.randint(0, len(neighbors)-1)]\n neighbors.remove(point)\n village_points.append(point)\n\n # put village points on map\n for point in village_points:\n self.map[point[0]][point[1]][\"village\"] = True", "def placeable(state):\n players = defaultdict(set)\n for coordinate, value in state.grid.items():\n player, _ = value\n for n in neighbours(coordinate):\n players[player].add(n)\n # All neighbours to any tile placed by current player...\n coordinates = players[state.player()]\n # ...except where the opponent is neighbour...\n for p in players:\n if p != state.player():\n coordinates.difference_update(players[p])\n # ...and you cannot place on top of another tile.\n coordinates.difference_update(state.grid.keys())\n\n return coordinates", "def __pos_mines(self):\n mines_pos_1d = np.random.choice(self.n_grid ** 2, self.n_mines, replace=False)\n mines_pos = list(map(lambda x: divmod(x, self.n_grid), mines_pos_1d))\n for r, c in mines_pos:\n self._mine_map.curr[r][c] = -1\n for di, dj in self.neighbors:\n i, j = r + di, c + dj\n if i < 0 or i >= self.n_grid or j < 0 or j >= self.n_grid:\n continue\n elif self._mine_map.curr[i][j] == -1:\n continue\n else:\n # adding one adjacent bomb\n self._mine_map.curr[i][j] += 1", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def generate_map_coords():\n zcoord = 0\n tile = query_tiles(Tile.nw_corner.name)\n yield db.MapCoord(xcoord=-1, ycoord=1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.n_wall.name)\n yield db.MapCoord(xcoord=0, ycoord=1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.n_wall.name)\n yield db.MapCoord(xcoord=1, ycoord=1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.ne_corner.name)\n yield db.MapCoord(xcoord=2, ycoord=1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.w_wall.name)\n yield db.MapCoord(xcoord=-1, ycoord=0, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.ground.name)\n yield db.MapCoord(xcoord=0, ycoord=0, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.ground.name)\n yield db.MapCoord(xcoord=1, ycoord=0, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.e_wall.name)\n yield db.MapCoord(xcoord=2, ycoord=0, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.w_wall.name)\n yield db.MapCoord(xcoord=-1, ycoord=-1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.obstacle.name)\n yield db.MapCoord(xcoord=0, ycoord=-1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.ground.name)\n yield db.MapCoord(xcoord=1, ycoord=-1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.e_wall.name)\n yield db.MapCoord(xcoord=2, ycoord=-1, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.sw_corner.name)\n yield db.MapCoord(xcoord=-1, ycoord=-2, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.s_wall.name)\n yield db.MapCoord(xcoord=0, ycoord=-2, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.s_wall.name)\n yield db.MapCoord(xcoord=1, ycoord=-2, zcoord=zcoord, tile_id=tile.id)\n tile = query_tiles(Tile.se_corner.name)\n yield db.MapCoord(xcoord=2, ycoord=-2, zcoord=zcoord, tile_id=tile.id)", "def get_adjecent_tiles(self):\n tiles = []\n \n # north:\n if self.valid_pos(self.old[0], self.old[1]-1):\n tiles.append(self.get_tile(\"north\", self.old[0], self.old[1]-1))\n else:\n tiles.append([\"north\", \"wall\", \"safe\", \"safe\"])\n # south:\n if self.valid_pos(self.old[0], self.old[1]+1):\n tiles.append(self.get_tile(\"south\", self.old[0], self.old[1]+1)) \n else:\n tiles.append([\"south\", \"wall\", \"safe\", \"safe\"]) \n # west:\n if self.valid_pos(self.old[0]-1, self.old[1]):\n tiles.append(self.get_tile(\"west\", self.old[0]-1, self.old[1]))\n else:\n tiles.append([\"west\", \"wall\", \"safe\", \"safe\"])\n # east:\n if self.valid_pos(self.old[0]+1, self.old[1]):\n tiles.append(self.get_tile(\"east\", self.old[0]+1, self.old[1]))\n else:\n tiles.append([\"east\", \"wall\", \"safe\", \"safe\"])\n \n return tiles" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count unoccupied neighbors of a point.
def countFreeNeighbors( p, board, occupation): n = 0 for m in [0, 1]: for d in [-1, 1]: pn = [p[0], p[1]] pn[m] += d j = board.grids.get( tuple(pn), None) if (j is None): continue # Not a board point if (occupation.has_key( j)): continue # Occupied n += 1 return n
[ "def count_neighbors(self, row, col):\r\n neighbors = 0\r\n for i in range(-1, 2):\r\n for j in range(-1, 2):\r\n try:\r\n val = self.grid[row + i][col + j]\r\n if val and not (i == 0 and j == 0):\r\n neighbors += 1\r\n except IndexError:\r\n pass\r\n return neighbors", "def countNeighbours(image,x,y,d=1):\n return sum([image[y+dy][x+dx] for dx in range(-d,d+1) for dy in range(-d,d+1) if not(dx==dy==0)])", "def count_neighbours(self, x, y, stop_at=8):\n possible_locations = [\n (x - 1, y - 1),\n (x, y - 1),\n (x + 1, y - 1),\n (x - 1, y),\n (x + 1, y),\n (x - 1, y + 1),\n (x, y + 1),\n (x + 1, y + 1),\n ]\n\n count = 0\n for x, y in possible_locations:\n if self.seat_occupied(x, y):\n count += 1\n if count >= stop_at:\n break\n return count", "def n_neighbors(self,n):\n return sum(1 for x in self.hex.get_neighbors_ring(n) if x is not None and x.is_occupied == 1)", "def n_neighbors_alive(self, row, col):\n return len([cell for cell in self.graph.neighbors(row, col) if cell.is_alive])", "def count_neighbors(self, x, y):\n # IMPLEMENT ME\n # HINT: You do not have to use a for-loop for this method; just\n # if-statements will suffice. Also, you do not need to indent further\n # than two levels further than this comment.\n neighbours = 0\n if x > 0 and y > 0:\n if self.board[x-1][y-1] == \"x\":\n neighbours += 1\n if x > 0:\n if self.board[x-1][y] == \"x\":\n neighbours += 1\n if x > 0 and y < self.width - 1:\n if self.board[x-1][y+1] == \"x\":\n neighbours += 1\n if y > 0:\n if self.board[x][y-1] == \"x\":\n neighbours += 1\n if y < self.width - 1:\n if self.board[x][y+1] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y > 0:\n if self.board[x+1][y-1] == \"x\":\n neighbours += 1\n if x < self.height - 1:\n if self.board[x+1][y] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y < self.width - 1:\n if self.board[x+1][y+1] == \"x\":\n neighbours += 1\n return neighbours", "def count_neighbor(self, current_pos):\n count = 0\n x, y = current_pos\n neighbors = []\n if x - 1 >= 0:\n neighbors.append((x - 1, y))\n if y - 1 >= 0:\n neighbors.append((x - 1, y - 1))\n if y + 1 < self.state.shape[1]:\n neighbors.append((x - 1, y + 1))\n \n if x + 1 < self.state.shape[0]:\n neighbors.append((x + 1, y))\n if y - 1 >= 0:\n neighbors.append((x + 1, y - 1))\n if y + 1 < self.state.shape[1]:\n neighbors.append((x + 1, y + 1))\n\n if y - 1 >= 0:\n neighbors.append((x, y - 1))\n \n if y + 1 < self.state.shape[1]:\n neighbors.append((x, y + 1))\n\n for pos in neighbors:\n if self.state[pos] == 1:\n count += 1\n\n return count", "def neighbour_count(self, x: int, y: int, direction: int):\n result = 0\n\n x_offset, y_offset = offsets_for_direction(direction)\n x += x_offset\n y += y_offset\n while self.is_populated(x, y):\n result += 1\n x += x_offset\n y += y_offset\n\n return result", "def count_alive_neighbors(grid, x, y):\n height = len(grid)\n width = len(grid[0])\n alive_count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n neighbor_x = x + i\n neighbor_y = y + j\n if i == 0 and j == 0:\n continue\n elif neighbor_x < 0 or neighbor_y < 0 or neighbor_y >= height or neighbor_x >= width:\n # Edges are considered alive. Makes map more likely to appear naturally closed.\n alive_count += 1\n elif grid[neighbor_y][neighbor_x] == 1:\n alive_count += 1\n return alive_count", "def count_active_neighbors(active: set, pt: tuple) -> int:\n return [tuple(pt[idx] + offset[idx] for idx in range(len(pt))) in active for offset in itertools.product((-1, 0, 1), repeat=len(pt))].count(True)", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def neighbors(board: Board, x: int, y: int) -> int:\n n = 0\n\n def border_check(x_, y_):\n \"\"\"Helper for determining whether given x_ and y_ are valid\"\"\"\n return 0 <= x_ < board.width and 0 <= y_ < board.height\n\n # for each offset in [-1; 1]\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i != 0 or j != 0:\n if border_check(x + i, y + j):\n n += 1 if board[x+i][y+j] == Cell.ALIVE else 0\n return n", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def get_neighbours_sum(self, x, y, current_point) -> int:\n return np.sum(self.grid_array[x-1:x+2, y-1:y+2]) - current_point", "def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines", "def count_land_neighbours(self):\n\t\tglobal neibs\n\t\tneibs = np.zeros((hh,wh),int)\n\t\t\n\t\t# Calculate the number of adjacent grids which are lands\n\t\tfor x in range(1,h+1): \n\t\t\tfor y in range(1,w+1):\n\t\t\t\tneibs[x,y] = lscape[x-1,y] \\\n\t\t\t\t\t+ lscape[x+1,y] \\\n\t\t\t\t\t+ lscape[x,y-1] \\\n\t\t\t\t\t+ lscape[x,y+1]", "def countNeighbors(row, col, A):\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if x == 0 and y == 0:\n count += 0\n else:\n if A[row + x][col + y] == 1:\n count += 1\n\n return count", "def sum_neighbours(d,x,y):\n return sum(d.get((i,j),0) for i in (x-1, x, x+1)\n for j in (y-1, y, y+1)\n if not (i == x and j == y))", "def num_neighbors(self):\n return self._num_neighbors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find unoccupied positions on the board.
def findUnoccupied( board, occupation): return [ j for j in xrange(len(board.positions)) if not occupation.has_key(j) ]
[ "def available_positions(board):\r\n positions = []\r\n for i in range(board.board.num_cols()):\r\n for j in range(board.board.num_rows()):\r\n if board.board[i, j] is None:\r\n positions.append((i, j))\r\n return positions", "def unoccupied_squares(self):\n unoccupied = [\n Point(x, y) for x in range(1, self.dimension_max[0])\n for y in range(1, self.dimension_max[1])\n if Point(x, y) not in self.occupied_squares\n ]\n return unoccupied", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]", "def empty_neighborhood(self):\n self.neighborhood = self.model.grid.get_neighborhood(self.pos,moore=False, radius=1)\n self.empty_positions = self.neighborhood #[c for c in self.neighborhood if self.model.grid.is_cell_filled(c)]\n\n\n return self.empty_positions", "def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)", "def get_unused_xy(self):\n # get a set of xys that could be connected to any existing ethernet\n xys_by_ethernet = set()\n for ethernet in self.ethernet_connected_chips:\n xys_by_ethernet.update(\n self.get_xys_by_ethernet(ethernet.x, ethernet.y))\n x = 0\n while (True):\n for y in range(self.height):\n xy = (x, y)\n if xy not in self._chips and xy not in xys_by_ethernet:\n return xy\n x += 1", "def adjacent_unoccupied_locations(self):\n return filter(self.is_location_unoccupied, self.adjacent_locations())", "def find_empty(self):\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n if self.board[i][j] == 0:\n return (i, j)\n \n return None", "def __find_empty_spot(self, board):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif board[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def get_empty_squares(self, board: list[list[int]]) -> list[tuple[int, int]]:\n indexes: list[tuple[int, int]] = []\n for y in range(0, 9):\n for x in range(0, 9):\n if board[y][x] == 0:\n indexes.append((x, y))\n\n return indexes", "def locations_of_pieces_with_valid_moves(active_player, board):\n results = []\n for y in range(len(board)):\n for x in range(len(board[y])):\n if reason_piece_at_location_cant_move((x, y), active_player, board) is None:\n results.append((x, y))\n return results", "def available_positions(self):\n available_positions = []\n for i in range(self.positions_count):\n if self.board[i] == 0:\n available_positions.append(i+1)\n return available_positions", "def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []", "def remove_filled_positions(self, positions, board):\n\n new_positions = []\n for p in positions:\n if board.check_move(p[0], p[1]):\n new_positions.append(p)\n return new_positions", "def free_positions(self):\r\n result_list = []\r\n for i in range(3):\r\n for j in range(3):\r\n if self.data[i][j] is None:\r\n result_list.append(tuple([i, j]))\r\n return result_list", "def unstable_locs(grid):\n return np.argwhere(grid >= 4)", "def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos", "def adjacent_unoccupied_safe_locations(self):\n return filter(self.is_location_safe, self.adjacent_unoccupied_locations())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether the model instance has already been selected in a related field (ManyToManyField, OneToOneField).
def available(self): fields = self._meta.get_fields() for field in fields: if isinstance(field, models.ManyToManyRel): attr = field.get_accessor_name() if getattr(self, attr).count() > 0: return False elif isinstance(field, models.OneToOneRel): attr = field.get_accessor_name() if getattr(self, attr, None): return False return True
[ "def relation_exists(cls, model):\n return bool(cls.get_related_field(model)\n or cls.get_reverse_related_field(model))", "def _is_join_model(model):\n return all([\n (field.primary_key or field.rel)\n for field in model._meta.fields\n ]) and len(model._meta.fields) > 1", "def has_field(self, field):\n return field in self.extra_fields", "def contains(self, obj):\n self._not_support_combined_queries(\"contains\")\n if self._fields is not None:\n raise TypeError(\n \"Cannot call QuerySet.contains() after .values() or .values_list().\"\n )\n try:\n if obj._meta.concrete_model != self.model._meta.concrete_model:\n return False\n except AttributeError:\n raise TypeError(\"'obj' must be a model instance.\")\n if obj.pk is None:\n raise ValueError(\"QuerySet.contains() cannot be used on unsaved objects.\")\n if self._result_cache is not None:\n return obj in self._result_cache\n return self.filter(pk=obj.pk).exists()", "def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False", "def _optimize_field_by_name(self, store: QueryOptimizerStore, model, selection, field_def) -> bool:\n name = self._get_name_from_field_dev(field_def)\n if not (model_field := self._get_model_field_from_name(model, name)):\n return False\n _logger.info('_optimize_field_by_name %r %r', name, model_field)\n if self._is_foreign_key_id(model_field, name):\n # ToDo: check if this works - i write resolvers for this\n store.only(name)\n return True\n if model_field.many_to_one or model_field.one_to_one:\n # ForeignKey or OneToOneField\n field_store = self._optimize_gql_selections(\n selection.selections,\n self._get_type(field_def),\n )\n store.select_related(name, field_store)\n return True\n if model_field.one_to_many or model_field.many_to_many:\n field_store = self._optimize_gql_selections(\n selection.selections,\n self._get_type(field_def),\n )\n if isinstance(model_field, ManyToOneRel):\n field_store.only(model_field.field.name)\n related_queryset = model_field.related_model.objects.all()\n _logger.info('_optimize_field_by_name many relation %r %r', model, name)\n store.prefetch_related(name, field_store, related_queryset)\n return True\n if not model_field.is_relation:\n store.only(name)\n return True\n return False", "def _filter_m2m(self, field):\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(field.model, field.rel.to, field):\n return field", "def is_to_many_relationship(field):\n # We need the protected *_base_field* variable here. Check if there is\n # a way to implement this function without accessing private or protected\n # attributes.\n #\n # Please note, that *field.item_type* is not sufficient.\n return isinstance(field, motorengine.ListField) \\\n and is_to_one_relationship(field._base_field)", "def is_to_many_relationship(field):\n if isinstance(field, mongoengine.ListField) \\\n and is_to_one_relationship(field.field):\n return True\n\n if isinstance(field, mongoengine.SortedListField)\\\n and is_to_one_relationship(field.field):\n return True\n return False", "def multiple_objs(self):\n return self._has_multiple_objects('o_pks')", "def is_m2m_set(self, int_model, model1, model2):\n for m2m in model1._meta.many_to_many:\n if m2m.rel.to == model2 and m2m.rel.through == int_model:\n return True\n for m2m in model2._meta.many_to_many:\n if m2m.rel.to == model1 and m2m.rel.through == int_model:\n return True\n return False", "def is_many_to_many_lookup(self):\r\n return self.is_lookup and not self.is_type('int')", "def exists(self, pk):\n return super(OfficialAccountManager, self).get_queryset().filter(pk=pk).exists()", "def exists(self, value=None):\n try:\n if not value:\n value = self.get()\n except AttributeError:\n # If the instance is deleted, the _pk attribute doesn't exist\n # anymore. So we catch the AttributeError to return False (this pk\n # field doesn't exist anymore) in this specific case\n return False\n else:\n return self.connection.sismember(self.collection_key, value)", "def has_item(self, item):\n return item in self.set", "def has_field(cls, field) -> bool:\n try:\n cls._meta.get_field(field)\n return True\n except models.FieldDoesNotExist:\n return False", "def _filter_related_m2m(self, rel):\n field = rel.field\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def include_related(request):\n query_param_value = request.GET.get(\"include_related\")\n return query_param_value in [\"true\", \"True\"]", "def is_saved(self):\n return self.pk is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
outputs the noise covariance matrix, R
def getCovarianceNoiseMatrix(self): return np.dot ( self.getB().T, self.getB() )
[ "def get_cov(noise):\n num_coils = noise.shape[0]\n X = noise.reshape([num_coils, -1])\n X -= np.mean(X, axis=-1, keepdims=True)\n cov = np.matmul(X, X.T.conjugate())\n\n return cov", "def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))", "def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data", "def rand_cov():\n c = uniform(-1, 1)\n return [[uniform(0, 1), c], [c, uniform(0, 1)]]", "def _cov_sigma(self):\n D_K = tsa.duplication_matrix(self.neqs)\n D_Kinv = np.linalg.pinv(D_K)\n\n sigxsig = np.kron(self.sigma_u, self.sigma_u)\n return 2 * D_Kinv @ sigxsig @ D_Kinv.T", "def MVN_Denoise(Y, mvn_model, noise_std):\n return calc_weiner(mvn_model.cov, noise_std, Y, mvn_model.mean, False)\n # TODO: YOUR CODE HERE", "def get_cov_matrix_outputs(self):\n cov = numpy.diag(numpy.zeros(self.get_num_measured_outputs()))\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n cov[i,i] = o.get_covariance()\n i += 1\n return cov", "def covariance_matrix(self):\n covariance_matrix = np.zeros((self.N, self.N))\n for j in range(0, self.J):\n covariance_matrix += float(1)/self.J*(self.dataset[:, j].reshape(self.N, 1) - self.mean_data_vect)\\\n .dot((self.dataset[:, j].reshape(self.N, 1) - self.mean_data_vect).T)\n return covariance_matrix", "def MVN_Denoise(Y, mvn_model, noise_std):\n\n return calc_weiner(Y, mvn_model.cov, mvn_model.means, noise_std)", "def noise_matrix (stddev=0.1):;\n noise = Meq.GaussNoise(stddev=stddev);\n return Meq.Matrix22(\n Meq.ToComplex(noise,noise),Meq.ToComplex(noise,noise),\n Meq.ToComplex(noise,noise),Meq.ToComplex(noise,noise)\n );", "def _inverse_covariance_matrix(self, sigma_m_z=None):\n # here is the option for adding an additional covariance matrix term of the calibration and/or systematic\n # errors in the evolution of the Sne population\n if sigma_m_z is None or self._no_intrinsic_scatter:\n return self._cov_mag, self._inv_cov_mag_input\n # cov_mag_diag = self._cov_mag.diagonal()\n cov_mag = self._cov_mag + np.diag(np.ones(self.num_sne) * sigma_m_z**2)\n # np.fill_diagonal(self._cov_mag, cov_mag_diag + np.ones(self.num_sne) * sigma_m_z**2)\n invcov = np.linalg.inv(cov_mag)\n return cov_mag, invcov", "def get_covariance(self, session=None):\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * mt.sqrt(exp_var[:, mt.newaxis])\n exp_var_diff = mt.maximum(exp_var - self.noise_variance_, 0.0)\n cov = mt.dot(components_.T * exp_var_diff, components_)\n cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace\n cov.execute(session=session)\n return cov", "def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n Q = self.sv2 * dt * dt * eye_like(self.sv2, self._dimension)\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]", "def ckm_covariance(self, N=1000):\n ckm = self.ckm_initial\n J = self.jacobian(**ckm)\n cov = self.obs_covariance(N=N)\n pred_sm = self.np_predictions_nockm(w=None, **ckm)\n cov = cov / np.outer(pred_sm, pred_sm)\n iJ = np.linalg.inv(J)\n return iJ @ cov @ iJ.T", "def cov_matrix(self):\n r = self.query_gaia_object()\n # empty matrix to build the covariance matrix\n c = np.empty((6, 6), dtype=float)\n c[0, 0] = r['ra_error']*r['ra_error']\n c[0, 1] = c[1, 0] = r['ra_error']*r['dec_error']*r['ra_dec_corr']\n c[0, 2] = c[2, 0] = r['ra_error']*r['parallax_error']*r['ra_parallax_corr']\n c[0, 3] = c[3, 0] = r['ra_error']*r['pmra_error']*r['ra_pmra_corr']\n c[0, 4] = c[4, 0] = r['ra_error']*r['pmdec_error']*r['ra_pmdec_corr']\n c[0, 5] = c[5, 0] = 0\n c[1, 1] = r['dec_error']*r['dec_error']\n c[1, 2] = c[2, 1] = r['dec_error']*r['parallax_error']*r['dec_parallax_corr']\n c[1, 3] = c[3, 1] = r['dec_error']*r['pmra_error']*r['dec_pmra_corr']\n c[1, 4] = c[4, 1] = r['dec_error']*r['pmdec_error']*r['dec_pmdec_corr']\n c[1, 5] = c[5, 1] = 0\n c[2, 2] = r['parallax_error']*r['parallax_error']\n c[2, 3] = c[3, 2] = r['parallax_error']*r['pmra_error']*r['parallax_pmra_corr']\n c[2, 4] = c[4, 2] = r['parallax_error']*r['pmdec_error']*r['parallax_pmdec_corr']\n c[2, 5] = c[5, 2] = 0\n c[3, 3] = r['pmra_error']*r['pmra_error']\n c[3, 4] = c[4, 3] = r['pmra_error']*r['pmdec_error']*r['pmra_pmdec_corr']\n c[3, 5] = c[5, 3] = 0\n c[4, 4] = r['pmdec_error']*r['pmdec_error']\n c[4, 5] = c[5, 4] = 0\n c[5, 5] = r['dr2_radial_velocity_error']*r['dr2_radial_velocity_error']\n return c", "def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov", "def covariance(X):\n\t\n\tN = X.shape[0]\n\tY = X - np.mean(X, 0)\n\tS = np.dot(Y.T, Y)/float(N-1)\n\treturn S", "def get_thermal_covariance(self):\n cov = []\n for var in self.noise_power_variance:\n cov.append(np.diag(var * np.ones(len(self.eta))))\n\n return cov", "def GSM_Denoise(Y, gsm_model, noise_std):\n d, N = Y.shape\n print(d, N)\n k = len(gsm_model.mix)\n opt_x = []\n C_i_y = np.zeros((N, k))\n var = np.identity(d) * (noise_std ** 2)\n\n for i in range(k):\n C_i_y[:, i] = np.log(gsm_model.mix[i]) + multivariate_normal.logpdf(Y.T, gsm_model.gmm.means[i][:],\n gsm_model.cov[i] + var, allow_singular=True)\n\n C_i_y = np.exp(normalize_log_likelihoods(C_i_y))\n for i in range(k):\n print((C_i_y[:, i] * calc_weiner(Y, gsm_model.cov[i], gsm_model.gmm.means[i][:], noise_std)).shape)\n opt_x.append(C_i_y[:, i] * calc_weiner(Y, gsm_model.cov[i], gsm_model.gmm.means[i][:], noise_std))\n\n opt_x = np.sum(opt_x, axis=0)\n\n print(np.array(opt_x).shape)\n return np.array(opt_x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if the object has a parent with the supplied name.
def has_parent(obj, parent_name): if obj.parent is None: return False if obj.parent.name is None: return False elif obj.parent.name == parent_name: return True else: return has_parent(obj.parent, parent_name)
[ "def has_parent(self):\n return self.get_parent() is not None", "def has_parent(obj):\n return obj and hasattr(obj, 'parent') and obj.parent", "def has_parent(self):\n return self.parent != None", "def has_parent(self):\n return self.parent is not None", "def has_parent(self):\n return self._parent_ is not None", "def has_parent(self, index: int) -> bool:\n return self.parent_index(index) >= 0", "def isChildExists(self, name):\n return self.getChild(name) != None", "def is_parent(self) -> bool:\n return AccountEntry.objects.filter(parent=self).exists()", "def contains_parent(self, pid):\n return pid in self._parent_ids", "def HasParent(self, type_):\r\n\r\n try:\r\n\r\n if self._parents[type_]:\r\n\r\n return True\r\n\r\n except KeyError:\r\n\r\n return False\r\n\r\n return False", "def parent_of(self, node):\n parent = node.parent\n while parent is not None:\n if self is parent:\n return True\n parent = parent.parent\n return False", "def has_parent_key(self):\n if self.is_root():\n return False\n try:\n self.parent_key()\n return True\n except ParseException:\n return False", "def _is_parent(a, b):\n # type: (PredContext, PredContext) -> bool\n while b and a is not b:\n b = getattr(b, 'parent', None)\n return a is b", "def has_child(self, name):\n return name in self.children", "def class_has_legit_device_parent(self, class_name):\n if not self.class_has_device_logic(class_name):\n return False\n has_objects_origins = self.get_has_objects_origin_names(\n class_name, include_root=True)\n if len(has_objects_origins) != 1:\n return False # referenced from multiple parents, so far sure no unique parent\n if has_objects_origins[0] == \"Root\":\n return True # Root by definition has device logic\n if not self.class_has_device_logic(has_objects_origins[0]):\n return False\n return True", "def is_parent_of(self, node):\n if node.depth <= self.depth:\n return False\n return node.parent(self.depth) == self", "def hasChild(self, name):\n res_children = self._resource.get(spc_func.CHILDREN_ATTR_NAME, list())\n return name in [res_child.get('name', None) for res_child in res_children]", "def has_name(self, name: str) -> bool:\n return name in self.child_tags", "def is_parent(self, item):\n if len(self.df.loc[self.df['parent_code']==item, :]): \n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
will simulation PARALLEL_UNIVERSES_COUNT universes then, will return the overall multiverse survival of the player
def compute_player_score(): progress_bar = ProgressBar(label="Computing universes") survivals_count = 0 for i in range(PARALLEL_UNIVERSES_COUNT): if simulate_universe(): survivals_count += 1 progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT) progress_bar.end("\n\n") return survivals_count / PARALLEL_UNIVERSES_COUNT
[ "def simulate_universe():\n\n # untreated_survival is the probability to survive if not treated\n # this is an exact law of the universe, the player will not have this information\n untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n\n trials: list[Trial] = []\n\n treated_survivals: dict[Trial, float] = {}\n\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE)\n\n # treated_survival is the probability to survive if treated\n # this is an exact law of the universe, the player will not have this information\n # therefore it is stored in a separate dict and not in the given-to-player Trial object\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n\n trial = Trial(group_size, untreated_survival, treated_survival)\n\n trials.append(trial)\n treated_survivals[trial] = treated_survival\n\n chosen_trial = playground.choose_trial(trials)\n\n if chosen_trial is None: # None means no treatment\n chosen_survival = untreated_survival\n else:\n chosen_survival = treated_survivals[chosen_trial]\n\n return random.uniform(0, 1) <= chosen_survival", "def calculate_survivors(self, planet=None):\n mage_life = 0\n if self.magos:\n\n # Calculate survivors\n mage_life = sum(self.race.rango_vid_mago) // 2\n survivors = min(self.vida / mage_life, len(self.magos))\n if planet:\n planet.magos = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.magos)\n [self.magos.pop() for i in range(len(self.magos) - survivors)]\n for m in self.magos:\n m = mage(m.ataque + 5, m.vida + 10)\n\n if self.soldados:\n\n # Calculate survivors\n soldier_life = sum(self.race.rango_vid_soldado) // 2\n survivors = self.vida - len(self.magos)*mage_life\n survivors //= soldier_life\n if planet:\n planet.soldados = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.soldados)\n [self.soldados.pop()\n for i in range(len(self.soldados) - survivors)]\n for s in self.soldados:\n s = soldier(s.ataque + 5, s.vida + 10)", "def get_returning_survivor_years(self):\n\n if not \"returning_survivor\" in self.survivor.keys():\n return []\n else:\n return self.survivor[\"returning_survivor\"]", "async def survival(self):\n path = \"/players/{}/survival_mastery\".format(self.id)\n resp = await self.client.requests.get(path=path)\n return Survival(resp)", "def calc_time_survived_mult(time_survived, game_length):\n return TIME_SURV_BASE + (time_survived / game_length)", "def update_returning_survivor_years(self, add_year=None):\n\n r = \"returning_survivor\"\n\n if not r in self.survivor.keys():\n self.survivor[r] = []\n\n if add_year is not None and not \"dead\" in self.survivor.keys():\n add_year = int(add_year)\n self.survivor[r].append(add_year)\n\n self.survivor[r] = list(set(self.survivor[r]))", "def compute_survival_rate(sMC_particle_ancestries):\n survival_rate = {}\n for _direction, _lst in sMC_particle_ancestries.items():\n rate = []\n num_starting_particles = len(_lst[0])\n for step in range(len(sMC_particle_ancestries[_direction])):\n rate.append(float(len(set(sMC_particle_ancestries[_direction][step]))) / num_starting_particles)\n survival_rate[_direction] = rate\n\n return survival_rate", "def lives(self, members):\n if self.Parameter('EraseNetwork'): self.reinitialize_network(members)\n #alivemembers1 = self.social_death(members, Spare=self.Parameter('DenunciationCost'))\n #AliveMembers = self.social_death(alivemembers1, Spare=self.Parameter('AlonePenalty'))\n AliveMembers = members # Version without death for now...\n if self.Parameter('SelectionPressure') == 0:\n return\n if not AliveMembers:\n return\n BestScore = max([i.score() for i in AliveMembers])\n MinScore = min([i.score() for i in AliveMembers])\n if BestScore == MinScore:\n return\n for indiv in members:\n indiv.LifePoints = int ( (indiv.score()-MinScore) * self.Parameter('SelectionPressure') / (BestScore - MinScore) )\n #print(indiv.LifePoints)\n #print(indiv.score() )\n #print(indiv.Patriotism)\n #print('\\n')", "def college_selectivity():", "def get_survivors(self, return_type=False, user_id=None, exclude=[], exclude_dead=False):\n\n query = {\"removed\": {\"$exists\": False}, \"settlement\": self.settlement[\"_id\"], \"_id\": {\"$nin\": exclude}}\n\n if exclude_dead:\n query[\"dead\"] = {\"$exists\": False}\n\n survivors = mdb.survivors.find(query).sort(\"name\")\n\n if self.User is not None:\n user_login = self.User.user[\"login\"]\n elif self.User is None and user_id is not None:\n self.User = User(user_id=user_id)\n user_login = self.User.user[\"login\"]\n else:\n self.User = None\n user_login = None\n\n current_user_is_settlement_creator = False\n if self.User is not None and self.User.user[\"_id\"] == self.settlement[\"created_by\"]:\n current_user_is_settlement_creator = True\n elif self.User is not None and \"admins\" in self.settlement.keys() and self.User.user[\"login\"] in self.settlement[\"admins\"]:\n current_user_is_settlement_creator = True\n\n if return_type == \"hunting_party\":\n hunting_party = []\n for survivor in survivors:\n if \"in_hunting_party\" in survivor.keys():\n hunting_party.append(survivor)\n return hunting_party\n\n if return_type == \"html_buttons\":\n output = \"\"\n for survivor in survivors:\n S = Survivor(survivor_id=survivor[\"_id\"])\n output += S.asset_link()\n return output\n\n if return_type == \"sex_count\":\n male = 0\n female = 0\n for s in survivors:\n if s[\"sex\"] == \"M\":\n male += 1\n elif s[\"sex\"] == \"F\":\n female += 1\n return \"%sM/%sF\" % (male,female)\n\n if return_type == \"html_campaign_summary\":\n # this is our big boy, full-featured controls for survivor management\n if survivors.count() == 0:\n return html.survivor.no_survivors_error\n\n groups = {\n 1: {\"name\": \"Departing\", \"survivors\": [], },\n 2: {\"name\": \"Favorite\", \"survivors\": [], },\n 3: {\"name\": \"Available\", \"survivors\": [], },\n 4: {\"name\": \"Skipping Next Hunt\", \"survivors\": [], },\n 5: {\"name\": \"Retired\", \"survivors\": [], },\n 6: {\"name\": \"The Dead\", \"survivors\": [], },\n }\n\n anonymous = []\n available = []\n for survivor in survivors:\n\n S = Survivor(survivor_id=survivor[\"_id\"], session_object=self.Session)\n annotation = \"\"\n user_owns_survivor = False\n disabled = \"disabled\"\n\n # stylize the survivor name\n savior_dict = {\n \"Lucernae\": \"Dream of the Lantern\",\n \"Caratosis\": \"Dream of the Beast\",\n \"Dormenatus\": \"Dream of the Crown\",\n }\n\n savior_square = \"\"\n for epithet in S.get_epithets():\n if epithet in [\"Lucernae\", \"Caratosis\", \"Dormenatus\"]:\n savior_square = '&ensp; <font id=\"%s\">&#x02588; <i>%s</i></font> <br/>' % (epithet, savior_dict[epithet])\n\n if survivor[\"email\"] == user_login or current_user_is_settlement_creator or \"public\" in survivor.keys():\n disabled = \"\"\n user_owns_survivor = True\n\n button_class = \"\"\n if user_owns_survivor:\n button_class = \"survivor\"\n\n if \"skip_next_hunt\" in S.survivor.keys():\n annotation = \"&ensp; <i>Skipping next hunt</i><br/>\"\n button_class = \"tan\"\n\n for t in [(\"retired\", \"retired_in\", \"tan\"),(\"dead\", \"died_in\", \"silver\")]:\n attrib, event, color = t\n if attrib in S.survivor.keys():\n if event in S.survivor.keys():\n annotation = \"&ensp; <i>%s LY %s</i><br/>\" % (event.replace(\"_\",\" \").capitalize(), S.survivor[event])\n else:\n annotation = \"&ensp; <i>%s</i><br/>\" % attrib.title()\n button_class = color\n\n\n s_id = S.survivor[\"_id\"]\n if not user_owns_survivor:\n s_id = None\n\n\n can_hunt = \"\"\n if \"dead\" in S.survivor.keys() or \"retired\" in S.survivor.keys() or \"skip_next_hunt\" in S.survivor.keys():\n can_hunt = \"disabled\"\n\n in_hunting_party = \"checked\"\n if \"in_hunting_party\" in S.survivor.keys():\n in_hunting_party = None\n can_hunt = \"\"\n\n is_favorite = \"hidden\"\n if \"favorite\" in S.survivor.keys():\n is_favorite = \"favorite\"\n\n avatar_img = \"\"\n if \"avatar\" in S.survivor.keys():\n avatar_img = S.get_avatar(\"html_campaign_summary\")\n\n survivor_html = html.survivor.campaign_asset.safe_substitute(\n avatar = avatar_img,\n survivor_id = s_id,\n settlement_id = self.settlement[\"_id\"],\n hunting_party_checked = in_hunting_party,\n settlement_name = self.settlement[\"name\"],\n b_class = button_class,\n able_to_hunt = can_hunt,\n returning = S.get_returning_survivor_status(\"html_badge\"),\n special_annotation = annotation,\n disabled = disabled,\n name = S.survivor[\"name\"],\n sex = S.get_sex(\"html\"),\n favorite = is_favorite,\n hunt_xp = S.survivor[\"hunt_xp\"],\n survival = S.survivor[\"survival\"],\n insanity = S.survivor[\"Insanity\"],\n courage = S.survivor[\"Courage\"],\n understanding = S.survivor[\"Understanding\"],\n savior = savior_square,\n )\n\n # finally, file our newly minted survivor in a group:\n if \"in_hunting_party\" in S.survivor.keys():\n groups[1][\"survivors\"].append(survivor_html)\n elif \"dead\" in S.survivor.keys():\n groups[6][\"survivors\"].append(survivor_html)\n elif \"retired\" in S.survivor.keys():\n groups[5][\"survivors\"].append(survivor_html)\n elif \"skip_next_hunt\" in S.survivor.keys():\n groups[4][\"survivors\"].append(survivor_html)\n elif \"favorite\" in S.survivor.keys():\n groups[2][\"survivors\"].append(survivor_html)\n else:\n if S.survivor[\"name\"] == \"Anonymous\":\n anonymous.append(survivor_html)\n else:\n available.append(survivor_html)\n\n # build the \"available\" group\n groups[3][\"survivors\"].extend(available)\n groups[3][\"survivors\"].extend(anonymous)\n\n #\n # Start assembling HTML here\n #\n output = html.settlement.campaign_summary_survivors_top\n\n for g in sorted(groups.keys()):\n group = groups[g]\n\n\n if group[\"name\"] in [\"The Dead\", \"Retired\"]:\n color = None\n if group[\"name\"] == \"The Dead\":\n color = \"grey\"\n elif group[\"name\"] == \"Retired\":\n color = \"tan\"\n the_dead = \"\\n\".join(group[\"survivors\"])\n g = group[\"name\"].replace(\" \",\"\").lower() + \"BlockGroup\"\n output += html.survivor.campaign_summary_hide_show.safe_substitute(color=color, group_id=g, heading=group[\"name\"], death_count = len(group[\"survivors\"]), dead_survivors=the_dead)\n else:\n output += \"<h4>%s (%s)</h4>\\n\" % (group[\"name\"], len(group[\"survivors\"]))\n\n\n for s in group[\"survivors\"]:\n output += \" %s\\n\" % s\n\n if group[\"name\"] == \"Departing\" and group[\"survivors\"] != []:\n bonuses = self.get_bonuses(\"departure_buff\")\n if bonuses != {}:\n output += '<hr class=\"invisible\"><span class=\"tiny_break\"></span>'\n for b in sorted(bonuses.keys()):\n output += \"<p><b>%s:</b> %s</p>\" % (b, bonuses[b])\n if bonuses != {}:\n output += '<span class=\"tiny_break\"/></span>'\n\n\n if group[\"name\"] == \"Departing\" and group[\"survivors\"] == []:\n output += \"<p>Use [::] to add survivors to the Departing group.</p>\"\n elif group[\"name\"] == \"Departing\" and group[\"survivors\"] != [] and current_user_is_settlement_creator:\n # settlement admin_controls; only show these if we've got\n # survivors and the current user is the admin\n\n output += html.settlement.hunting_party_macros.safe_substitute(settlement_id=self.settlement[\"_id\"])\n\n # current quarry controls\n quarry_options = []\n for q in self.get_game_asset(\"defeated_monsters\", return_type=\"options\"):\n if \"current_quarry\" in self.settlement.keys() and self.settlement[\"current_quarry\"] == q:\n quarry_options.append(\"<option selected>%s</option>\" % q)\n else:\n quarry_options.append(\"<option>%s</option>\" % q)\n output += html.settlement.current_quarry_select.safe_substitute(options=quarry_options, settlement_id=self.settlement[\"_id\"])\n\n # finally, controls to return the hunting party\n if self.User.get_preference(\"confirm_on_return\"):\n output += html.settlement.return_hunting_party_with_confirmation.safe_substitute(settlement_id=self.settlement[\"_id\"])\n else:\n output += html.settlement.return_hunting_party.safe_substitute(settlement_id=self.settlement[\"_id\"])\n\n output += html.settlement.hunting_party_macros_footer\n\n return output + html.settlement.campaign_summary_survivors_bot\n\n if return_type == \"chronological_order\":\n return mdb.survivors.find(query).sort(\"created_on\")\n\n return survivors", "def child_U(self):\n return math.sqrt(self.number_visits) * self.child_priors / (\n 1 + self.child_number_visits)", "def playersVehicleScoring(self):\n self.__playersDriverNum()\n return self.Rf2Scor.mVehicles[self.__playersDriverNum()]", "def __population_quality(self) -> float:\n population_identifier = np.zeros(shape=self.Dataset.size)\n subgroup_identifier = np.ones(shape=len(self.get_cover()))\n group = np.concatenate((population_identifier,\n subgroup_identifier))\n\n subgroup_times = self.Dataset.survival[self.get_cover()]\n subgroup_status = self.Dataset.status[self.get_cover()]\n\n time = np.concatenate((self.Dataset.survival, subgroup_times))\n status = np.concatenate((self.Dataset.status, subgroup_status))\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue", "def get_uniquenesses(self):\n # meets all of our expected criteria\n check_is_fitted(self, 'loadings_')\n communalities = self.get_communalities()\n communalities = communalities.copy()\n uniqueness = (1 - communalities)\n return uniqueness", "def survived(self,rates):\n # Juvenile or adult survival\n if self.age > 3:\n prob = rates[1]\n else:\n prob = rates[0]\n if np.random.binomial(1,prob):\n return True\n else:\n return False", "def gen_candidates_pvs_chaos(bureau_vote):\r\n global session_electorale\r\n global cheat_candidates\r\n global probabilite\r\n for candidate in session_electorale.candidates:\r\n if random.random() <= probabilite or candidate == session_electorale.current_president:\r\n if cheat_candidates[candidate]:\r\n if random.random() < 0.5:\r\n gen_candidate_wrong_pv(candidate, bureau_vote)\r\n continue\r\n copy_original_pv = PV(candidate, bureau_vote, bureau_vote.pVs[0].result.copy())\r\n bureau_vote.pVs.append(copy_original_pv)\r\n\r\n if session_electorale.current_president in session_electorale.candidates:\r\n for pv in bureau_vote.pVs:\r\n if pv.owner == session_electorale.current_president:\r\n copy_current_presi_pv = PV(session_electorale.organism, bureau_vote, pv.result.copy())\r\n bureau_vote.pVs.append(copy_current_presi_pv)\r\n break\r\n else:\r\n copy_original_pv = PV(session_electorale.organism, bureau_vote, bureau_vote.pVs[0].result.copy())\r\n bureau_vote.pVs.append(copy_original_pv)", "def impactfactor(venue):\r\n pass", "def vote_of_citizens():\n\tglobal vote_first_candidate\n\tglobal vote_second_candidate\n\tglobal blank_vote\n\t\n\tfor i in range(NUMBER_OF_CITIZENS):\n\t\tvote = random.randint(1,10)\n\n\t\tif(vote <= 3):\n\t\t\tvote_first_candidate+=1\n\t\telif(vote > 3 and vote <= 6):\n\t\t\tvote_second_candidate+=1\n\t\telse:\n\t\t\tblank_vote+=1", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
simulates a universe and uses playground.choose_trial to take a decision return true in cas of survival in the simulated universe
def simulate_universe(): # untreated_survival is the probability to survive if not treated # this is an exact law of the universe, the player will not have this information untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL) trials: list[Trial] = [] treated_survivals: dict[Trial, float] = {} for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)): group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE) # treated_survival is the probability to survive if treated # this is an exact law of the universe, the player will not have this information # therefore it is stored in a separate dict and not in the given-to-player Trial object treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL) trial = Trial(group_size, untreated_survival, treated_survival) trials.append(trial) treated_survivals[trial] = treated_survival chosen_trial = playground.choose_trial(trials) if chosen_trial is None: # None means no treatment chosen_survival = untreated_survival else: chosen_survival = treated_survivals[chosen_trial] return random.uniform(0, 1) <= chosen_survival
[ "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def run():\n\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(QLearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n # Now simulate it\n sim = Simulator(e, update_delay=0.00001) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_tournament_selection1():\n # Make a population where binary tournament_selection has an obvious\n # reproducible choice\n pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),\n Individual(np.array([1, 1, 1]), problem=MaxOnes())]\n # Assign a unique identifier to each individual\n pop[0].id = 0\n pop[1].id = 1\n\n # We first need to evaluate all the individuals so that\n # selection has fitnesses to compare\n pop = Individual.evaluate_population(pop)\n selected = ops.tournament_selection(pop)\n\n N = 1000\n p_thresh = 0.1\n observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)\n expected_dist = { pop[0].id: 0.25*N, pop[1].id: 0.75*N } \n print(f\"Observed: {observed_dist}\")\n print(f\"Expected: {expected_dist}\")\n assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))", "def run():\n number_of_trials = 100\n print 'RUNNING SIMULATION FOR {} TRIALS...'.format(number_of_trials)\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=.0001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=number_of_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n a.performace_report(number_of_trials)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0,\n display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=n_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print \"\\n\\n--------------------------\\n\" \\\n \"Destination reached in {:0.2f} steps ON AVERAGE \" \\\n \"with a STD of {:0.2f} steps and min/max of {:0.0f}/{:0.0f}\" \\\n .format(np.sum(a.trip_len) * 1.0 / len(a.trip_len), np.std(a.trip_len), np.min(a.trip_len), np.max(a.trip_len))\n print a.trip_len\n print \"Success rate for this simulation is {:0.2f}% ({:0.0f}/{})\\n\" \\\n \"--------------------------\".format(len(a.trip_len) * 100.0 / n_trials, len(a.trip_len), n_trials)\n\n print len(a.qtable)", "def run(self, simulation):", "def observe_trials(\n cls, trials: list[Trial], algo: BaseAlgorithm, rng: numpy.random.RandomState\n ):\n backward.algo_observe(\n algo,\n trials,\n [dict(objective=rng.normal()) for _ in trials],\n )", "def simulate(params,n_states,n_trials,v0=0.0,crit=\"S\",env = \"rich\", \\\n\tmod = \"zero\", k=1., rho=0.0, r_mag = 1, l_mag = -1, rnd_seed = None):\n\n\t##########################################################################\n\tdef calc_rho(state):\n\t\t\"\"\"\n\t\tcalculates rho for the current trial and returns\n\t\tthe updated state tracker for rho and respective betas\n\t\t\"\"\"\n\t\tif mod == \"constant\": \t\t# artificially set rho to specific value, e.g. rho = 0\n\t\t\tstate.rho[t] = rho\n\t\telif mod == \"value\":\t\t# state's critic value, max if crit = SA\n\t\t\tstate.rho[t] = np.max(state.V[t])*k\n\t\telif mod == \"beta\":\n\t\t\t\talph = state.V[0]\n\t\t\t\tbet = state.V[1]\n\t\t\t\tmean, var = beta_rv.stats(alph,bet,moments='mv')\n\t\t\t\tstate.mean = mean\n\t\t\t\t# assume rmag and lmag are same for both options\n\t\t\t\tinfer_val = r_mag*mean + l_mag*(1-mean) # in [-1,1], like rho\n\t\t\t\tstate.rho[t] = infer_val*k \n\t\telse:\n\t\t\terr = 'Invalid value given for arg mod. \\\"%s\\\" given' %mod\n\t\t\traise Exception(err)\n\n\t\tstate.beta_g[t] = np.max([0,beta*(1 + state.rho[t])])\n\t\tstate.beta_n[t] = np.max([0,beta*(1 - state.rho[t])])\n\t\treturn state\n\n\tdef generate_state():\n\t\t\"\"\"\n\t\tintialize state for learning according to specified environment type\n\n\t\tr_mag = 1 # get double s.t.\n\t\tl_mag = -1 # lose s.t.\n\t\t\"\"\"\n\t\tif env == \"high\":\n\t\t\tn_options = 1\n\t\t\tprobs = np.random.uniform(.5,1.)\n\t\telif env == \"low\":\t\n\t\t\tn_options = 1\t\t\t\t\t\t\t\n\t\t\tprobs = np.random.uniform(0.,.5)\n\t\telse:\n\t\t\terr = 'Invalid value given for arg env. %s given' %env\n\t\t\traise Exception(err)\n\t\tnew_state = OpAL(n_trials, crit, v0, n_options, probs, r_mag, l_mag)\n\t\treturn new_state\n\t##########################################################################\n\n\t# define parameters\n\talpha_c, alpha_a, beta = params\n\tstates = []\n\n\t# check if random seed provided\n\tif rnd_seed is not None:\n\t\trandom.seed(rnd_seed)\n\t\tnp.random.seed(rnd_seed)\n\n\t# let's do this thing\n\tfor s in np.arange(n_states):\n\n\t\t# generate new learning state\n\t\tstate = generate_state()\n\t\tfor t in range(n_trials):\n\t\t\tstate.idx = t\n\t\t\tstate = calc_rho(state)\t # calculate rho, beta_g, beta_n\n\t\t\tstate.policy_gamble()\t\t# pick an action and generate PE\n\t\t\tstate.critic(alpha_c)\t\t# update critic with PE\n\t\t\tstate.act_gamble(alpha_a)\t# update actors with PE\n\n\t\t# save state learning\n\t\tstates.append(state)\n\n\treturn states", "def test_tournament_selection2():\n # Make a population where binary tournament_selection has an obvious\n # reproducible choice\n pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),\n Individual(np.array([1, 1, 1]), problem=MaxOnes())]\n # Assign a unique identifier to each individual\n pop[0].id = 0\n pop[1].id = 1\n\n # We first need to evaluate all the individuals so that\n # selection has fitnesses to compare\n pop = Individual.evaluate_population(pop)\n selected = ops.tournament_selection(pop, select_worst=True)\n\n N = 1000\n p_thresh = 0.1\n observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)\n expected_dist = { pop[0].id: 0.75*N, pop[1].id: 0.25*N } \n print(f\"Observed: {observed_dist}\")\n print(f\"Expected: {expected_dist}\")\n assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))", "def random_goals(agents,subgoals=3):\n\n possible_goals = predicates[2:5]+predicates[6:7]+predicates[8:9]\n possible_objects = list(objects)\n\n for agent in agents:\n agent_goals = []\n for _ in range(random.randint(1,subgoals)):\n agent_goals.append(random.choice(possible_goals))\n print(agent_goals)\n for j,agent_goal in enumerate(agent_goals):\n goal = agent_goal.split()\n new_goal = \"\"\n for i,part in enumerate(goal):\n if \"?\" in part:\n if \"cl\" in part:\n chosen = \"none\"\n while (\"(location \"+chosen+\")\" not in facts) and (\"(character \"+chosen+\")\" not in facts):\n chosen = random.choice(possible_objects)\n part = part.replace(\"?cl\",chosen)\n elif \"c\" in part:\n chosen = \"none\"\n while \"(character \"+chosen+\")\" not in facts:\n chosen = random.choice(possible_objects)\n part = part.replace(\"?c\",chosen)\n elif \"l\" in part:\n chosen = \"none\"\n while \"(location \"+chosen+\")\" not in facts:\n chosen = random.choice(possible_objects)\n part = part.replace(\"?l\",chosen)\n elif \"i\" in part:\n chosen = \"none\"\n while \"(item \"+chosen+\")\" not in facts:\n chosen = random.choice(possible_objects)\n part = part.replace(\"?i\",chosen)\n\n new_goal += \" \"+part\n agent_goals[j] = new_goal\n if len(agent_goals) > 1:\n goals.append(\"(and\"+\"\".join(agent_goals)+\")\")\n else:\n goals.append(agent_goals[0])\n print(goals)", "def test_simulation(a = rd.choice(list(Seats2018.keys())),d = Seats2018):\n\tSimulation = Electoral_Montecarlo.Complete_Simulation()\n\tassert np.abs(Simulation[a] - d[a])<10 #There's a little difference because of different method ripartition for the proportional part", "def schizophrenia():\n referee = Referee()\n player_x = players.QLearningPlayer('x', referee)\n player_o = players.QLearningPlayer('o', referee)\n\n player_o.Q = player_x.Q\n player_o.visited = player_x.visited\n\n print 'training the schizophrenic agent'\n print_results(referee, player_x, player_o, 10000, 5)\n\n print '\\ncreating a fresh opponent and letting it play as \\'x\\''\n new_x = players.QLearningPlayer('x', referee)\n new_o = player_o\n new_o.match_count = 0\n print_results(referee, new_x, new_o, 10000, 10)", "def main():\n test_unknown1 = test_unknown()\n test_unknown1.nonogram_solver()", "def Main():\n numberOfPopulation = 350\n numberOfDays = 60\n \n simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, \"Covid 19 Simulation\")\n simulation.run() \n simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, \"Ebola Simulation\")\n simulation.run()", "def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))", "def test(\n algorithm,\n capacity_hospital,\n number_steps,\n number_episodes,\n p_arr_prob,\n doctors,\n feature,\n rand_rewards = None,\n p_prob_test = None,\n gamma = 0.9,\n alpha = None,\n epsilon = 0.1,\n plot_type = \"hist\",\n title1 = \"\",\n title2 = \"\",\n earlyRewards = True,\n capacity_penalty = False,\n reward_evolution = False,\n naive_rewards = None):\n\n # an instance of the Hospital object (defined in hospital.py)\n hospital = Hospital(capacity_hospital, doctors, p_arr_prob)\n \n # function for the sarsa algorithm.\n # Q_weights - the weight matrix, 1 weight vector for each action in the simulation\n # total_reward_per_episode - a list with the reward for each episode\n # t_list - List of info for when each episode is terminated (if terminated)\n # --- if alpha = None then alpha is calculated as 1 / number_steps\n t_list, Q_weights, total_reward_per_episode = algorithm(\n hospital,\n feature,\n gamma,\n alpha,\n epsilon,\n number_episodes,\n number_steps,\n earlyRewards,\n capacity_penalty)\n \n \n # If you want to use different patient arrival probabilities for testing, a new hospital is created\n if p_prob_test is not None:\n hospital = Hospital(capacity_hospital, doctors, p_prob_test)\n\n if not(title1):\n title1 = (\"Early\" if earlyRewards else \"Late\")+\\\n \" rewards and \"+\\\n (\"a\" if capacity_penalty else \"no\")+\\\n \" capacity penalty\"\n \n # Simulate the learned policy.\n # props - a matrix for the relative distribution of patients in the queues.\n # rewards - a list with the reward acquired at each step in the simulation\n # cured - total number of cured patients during the simulation\n # time - total time waited by cured patients\n # cured_types - cured patients by types\n props, rewards, cured, time, cured_types, size = simulate(\n hospital,\n feature,\n Q_weights,\n steps = number_steps,\n plot = plot_type,\n title = title1,\n checkBefore = earlyRewards,\n cap_penalty = capacity_penalty)\n \n \n # A plot that shows the episodic reward evolution during the learning phase\n # this is also informative of how fast the algorithm is learning\n if reward_evolution:\n rewards_curve(total_reward_per_episode,\n number_episodes,\n title2,\n naive_rewards)\n \n \n # Extra information to be printed for the first figure\n print_extra_info(rewards, cured, number_steps, cured_types, sum(map(sum, time)), title1)\n \n return props, rewards, cured, time, cured_types, size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Probability grouping of category variables
def probability_categorical(feature, label): assert feature.nunique()>2, 'feature category nums must be greater than 2.' t = pd.DataFrame({'feature':feature, 'label':label}) cat = label.unique() cat = [(cat[i], cat[i+1]) for i in range(len(cat)-1)] prob = label.value_counts(1).to_dict() slope = [prob.get(i[0], 0)-prob.get(i[1], 0) for i in cat] slope_dict = t.feature.value_counts(1).to_dict() prob = t.groupby([ 'feature']).label.value_counts(1).to_dict() slope_dict = {i:{'category_rate':slope_dict[i], 'slope':[prob.get((i,j[0]), 0)-prob.get((i,j[1]), 0) for j in cat]} for i in slope_dict} for i in slope_dict: slope_dict[i]['slope_diff'] = sum([abs(slope[j]-slope_dict[i]['slope'][j]) for j in range(len(slope))]) value1 = sorted([[[i], slope_dict[i]['slope_diff'], slope_dict[i]['category_rate']] for i in slope_dict], key=lambda x:x[1], reverse=1) distance = sorted([value1[i][1]-value1[i+1][1] for i in range(len(value1)-1)]) std = pd.Series([i[1] for i in value1]).std() coupe = value1 dis = distance[0] for k in distance: value = value1 while 1: for i in range(len(value)-1): if value[i][1]-k<value[i+1][1]: value[i+1][0] = value[i][0]+value[i+1][0] value[i+1][1] = value[i][1]*value[i][2]/(value[i][2]+value[i+1][2])+value[i+1][1]*value[i+1][2]/(value[i][2]+value[i+1][2]) value[i+1][2] = value[i][2]+value[i+1][2] value.remove(value[i]) break if i==len(value)-2: break if pd.Series([i[1] for i in value]).std()>std: coupe = value std = pd.Series([i[1] for i in value]).std() dis = k return {'group':{k:i for i,j in enumerate(coupe) for k in j[0]}, 'data':coupe, 'distance':dis, 'distance_index':f'{distance.index(dis)+1}/{len(distance)}', 'std':std}
[ "def cat_conditional_probability(self):\n # get dummies and isolate salary series\n df = self.custom_preprocess(flip_salary_index=True, drop_cont=True, pivot_cat=True)\n salary_series = df['salary']\n df.drop(columns=['salary'], inplace=True)\n\n corr_frame = pd.DataFrame(index=list(df.keys()), columns=['P(>50K | is of category: _)'])\n\n # find probability for each dummy and store in a data frame\n for dummy in df.keys():\n # store multiplied columns in the dummy column.\n # if there is 1 in both columns the it will stay in the new Series\n pos_corr = (df[dummy] & salary_series).sum()\n total_of_dummy = df[dummy].sum()\n corr_frame['P(>50K | is of category: _)'][dummy] = pos_corr / total_of_dummy\n print(dummy, total_of_dummy, pos_corr)\n\n # plot results\n corr_frame['P(>50K | is of category: _)'].plot(kind='barh')\n plt.show()", "def univariate_categorical_viz(df, categorical_features):\n for feature in categorical_features:\n print(\"Distribution of the:'{}' categories\".format(feature))\n print(\"There are {} distinct categories\".format(len(df[feature].unique()))) \n print(\"Most common label is '{m}' with {p}%\"\n .format(m= df[feature].mode()[0],\n p=round(100*len(df.loc[df[feature] == df[feature].mode()[0]])/len(df))))\n categorical_univariate_plot(df,feature)", "def probability(df, key_list):\n pclass, sex = key_list\n filtered_df = df[(df.Sex == sex) & (df.Pclass == pclass)]\n return filtered_df['Survived'].mean()", "def categorical(pvals: np.ndarray) -> int:\n\n return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))", "def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def calcProbability(question):\n # print(\"Classifying Question:\",question)\n question = NBFunctions.stopWordRemoval(question)\n wordList = word_tokenize(question)\n wordProbabilities = []\n for eachCategory in categoryList:\n if eachCategory == \"Logic\":\n probWordInCategory = NBFunctions.bayesWordProb(wordList, freqLogic, totalCntLogic, totalFeatures)\n elif eachCategory == \"Fraction\":\n probWordInCategory = NBFunctions.bayesWordProb(wordList, freqFraction, totalCntFraction, totalFeatures)\n elif eachCategory == \"Algebra\":\n probWordInCategory = NBFunctions.bayesWordProb(wordList, freqAlgebra, totalCntAlgebra, totalFeatures)\n elif eachCategory == \"Comparison\":\n probWordInCategory = NBFunctions.bayesWordProb(wordList, freqComparison, totalCntComparison, totalFeatures)\n elif eachCategory == \"Percentage\":\n probWordInCategory = NBFunctions.bayesWordProb(wordList, freqPercentage, totalCntPercentage, totalFeatures)\n elif eachCategory == \"Probability\":\n probWordInCategory = NBFunctions.bayesWordProb(wordList, freqProbability, totalCntProbability, totalFeatures)\n elif eachCategory==\"Gk\":\n probWordInCategory = NBFunctions.bayesWordProb(wordList, freqGk, totalCntGk, totalFeatures)\n wordProbabilities.append(probWordInCategory)\n\n probabilityMap = {}\n probabilityMap = NBFunctions.probProduct(categoryList, wordProbabilities, probabilityMap)\n maxProbCategory = max(probabilityMap, key=probabilityMap.get)\n return (maxProbCategory)", "def categorical_log_probs(self):\n\n cutpoints = tf.convert_to_tensor(self.cutpoints)\n loc = tf.convert_to_tensor(self.loc)\n num_cat = self._num_categories()\n\n # For the StoppingRatioLogistic, we have:\n # P(X = c; X >= c, cutpoints, loc) = sigmoid(cutpoints[c] - loc)\n # Given these conditional probabilities, we would like to retrieve\n # P(X = c; cutpoints, loc).\n # Let F(c) = P(X = c; X >= c, cutpoints, loc) and\n # G(c) = P(X = c; cutpoints, loc)\n\n # Conditional probabilities. These are log(F(k)) and log(1 - F(k))\n conditional_log_probs = tf.math.log_sigmoid(\n cutpoints - loc[..., tf.newaxis])\n conditional_log_probs_complement = generic.log1mexp(conditional_log_probs)\n\n # Note that F(0) = G(0).\n # G(1) = P(X = 1; cutpoints, loc) =\n # P(X = 1; X >= 1, cutpoints, loc) * P(X >= 1) = F(1) * (1 - G(0))\n # G(2) = P(X = 2; cutpoints, loc) =\n # P(X = 2; X >= 2, cutpoints, loc) * P(X >= 2) = F(2) * (1 - G(0) - G(1))\n # In general, G(k) = F(k) * (1 - \\sum_{k-1} G(i))\n\n # We rewrite this recurrence in terms of F(k)\n # G(1) = F(1) * (1 - G(0)) = F(1) * (1 - F(0))\n # G(2) = F(2) * (1 - G(0) - G(1)) = (1 - F(0) - F(1) * (1 - F(0))\n # = F(2) * (1 - F(0)) * (1 - F(1))\n # G(k) = F(k) * \\prod_{k-1} (1 - F(i))\n\n # log(F(k)) + log(\\prod (1 - F(i)))\n categorical_log_probs = conditional_log_probs + tf.math.cumsum(\n conditional_log_probs_complement[..., :(num_cat - 1)],\n axis=-1, exclusive=True)\n # Finally we need to handle the last category.\n return tf.concat([\n categorical_log_probs,\n tf.math.reduce_sum(\n conditional_log_probs_complement[\n ..., :num_cat], axis=-1, keepdims=True)], axis=-1)", "def learn_distributions(file_lists_by_category):\n ### TODO: Comment out the following line and write your code here\n #raise NotImplementedError\n #initialize spam/ham counters\n n_spam = len(file_lists_by_category[0])\n n_ham = len(file_lists_by_category[1])\n #\n log_probability_spam = get_log_probabilities(file_lists_by_category[0])\n log_probability_ham = get_log_probabilities(file_lists_by_category[1])\n return ([log_probability_spam, log_probability_ham], [np.log(n_spam/(n_spam+n_ham)), np.log(n_ham/(n_spam+n_ham))])", "def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1", "def feature_prob(self, f, cat):\n if self.category_count(cat) == 0:\n return 0\n # The total number of times this feature appeared in this \n # category divided by the total number of items in this category\n pfc = self.feature_count(f, cat)\n pc = self.category_count(cat)\n return float(pfc)/pc", "def age_group_mixing():\n p = []\n for j in np.linspace(1,5,5):\n for k in np.linspace(1,5,5):\n if j == k:\n p.append(1)\n else:\n p.append(0.2**np.abs(j+1-k))\n p /= sum(p)\n return p", "def probability_given(data, class_label, feature_index):\n class_vectors = filter_on_class_label(data, class_label)\n return probability_of_feature(class_vectors, feature_index)", "def evaluate_probabilities(self, batches):\n total_batches = batches.batches_per_epoch()\n catprobs = []\n for batch in range(total_batches):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 1.0}\n fetch_dict = {\n \"catprobs\": self.categorical_probabilities}\n result = self.session.run(fetch_dict, feed_dict)\n catprobs.append(result[\"catprobs\"])\n catprobs = np.concatenate(catprobs)\n return catprobs", "def probabilities(train, attributes):\n import numpy as np\n\n if len(train) == 0:\n print('Input train dataset has problem!')\n exit(3)\n\n # this list contains probabilities of each feature that\n # shows p(feature i| y=j) for all features and classes.\n # format of list is:\n #[class 0{feature1: #,..., featureN: #},\n # ...,\n # class N{feature1: #,..., featureN: #}\n #]\n # initial list that contains n dictionaries for\n # each class\n probabilitiesOfFeatures = []\n for _class in range(len(attributes[6])):\n probabilitiesOfFeatures.append({})\n\n #this dictionary contains number of elements of each class\n numberOfEachClass = {}\n\n # For each feature calculate p(feature| y=i) for all features\n for observation in train:\n for index,feature in enumerate(observation[0:-1]):\n\n # count number of features given class\n if feature in probabilitiesOfFeatures[observation[len(observation)-1]]:\n probabilitiesOfFeatures[observation[len(observation)-1]][feature] += 1\n else:\n probabilitiesOfFeatures[observation[len(observation)-1]][feature] = 1\n\n # count number of elements of each class\n if observation[len(observation)-1] in numberOfEachClass:\n numberOfEachClass[observation[len(observation)-1]] += 1\n else:\n numberOfEachClass[observation[len(observation)-1]] = 1\n\n # divide x given y by size of y\n for index, classes in enumerate(probabilitiesOfFeatures):\n for feature in classes:\n probabilitiesOfFeatures[index][feature] /= numberOfEachClass[index]\n\n # return probabilities of features given classes\n return probabilitiesOfFeatures", "def _findClassProbabilities(training_labels):\r\n label_count_dict = Counter(training_labels)\r\n total_label_size = len(training_labels)\r\n \r\n for label, count in label_count_dict.iteritems():\r\n label_count_dict[label] = count / float(total_label_size)\r\n\r\n return label_count_dict", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert time_offsets to gps timestamps and nanoseconds
def get_gps_timestamp(file, time_offset): reference_date = get_reference_datetime(file) absolute_date = get_absolute_datetime(reference_date, time_offset) timestamp, nanosecond = datetime_to_gpstimestamp_nanoseconds(absolute_date) return timestamp, nanosecond
[ "def normalize_time(timestamps):\n phases = (timestamps - timestamps[0]) / (timestamps[-1] - timestamps[0])\n return phases", "def datetimes(self):\n if not self.offsets.offsets.shape:\n offsets = self.offsets.offsets.reshape((1,))\n else:\n offsets = self.offsets.offsets\n if np.issubdtype(self.offsets.offsets.dtype, np.integer):\n result = np.array([str(self.epoch +\n timedelta(**{self.offsets.unit.unit: v}))\n for v in offsets])\n else:\n result = self.offsets.offsets\n return result", "def _create_time_info(nn_intervals: List[float]) -> List[float]:\n # Convert in seconds\n nni_tmstp = np.cumsum(nn_intervals) / 1000\n\n # Force to start at 0\n # return nni_tmstp - nni_tmstp[0]\n return nni_tmstp - nni_tmstp[0]", "def convert_time(rawtimes):\n\n # rawtimes is in units of CXC seconds, or seconds since 1998.0\n # Compute the Delta T between 1998.0 (CXC's Epoch) and 1970.0 (Unix Epoch)\n\n seconds_since_1998_0 = rawtimes[0]\n\n cxctime = dt.datetime(1998, 1, 1, 0, 0, 0)\n unixtime = dt.datetime(1970, 1, 1, 0, 0, 0)\n\n # Calculate the first offset from 1970.0, needed by matplotlib's plotdate\n # The below is equivalent (within a few tens of seconds) to the command\n # t0 = Chandra.Time.DateTime(times[0]).unix\n delta_time = (cxctime - unixtime).total_seconds() + seconds_since_1998_0\n\n plotdate_start = epoch2num(delta_time)\n\n # Now we use a relative offset from plotdate_start\n # the number 86,400 below is the number of seconds in a UTC day\n\n times = (np.asarray(rawtimes) - rawtimes[0]) / 86400. + plotdate_start\n\n return times", "def datetime_to_gpstimestamp_nanoseconds(date):\n timestamp = gpstime.utc_to_gps(calendar.timegm(date.utctimetuple()))\n nanosecond = date.microsecond * 1000\n\n return timestamp, nanosecond", "def _create_interpolation_time(time_nni: List[float], sampling_frequency: int = 7) -> List[float]:\n # time_nni = _create_time_info(nn_intervals)\n # Create timestamp for interpolation\n nni_interpolation_tmstp = np.arange(0, time_nni[-1], 1 / float(sampling_frequency))\n return nni_interpolation_tmstp", "def get_times(self, times):\n times = at.Time(times / 86400., format='mjd')\n return times.isot, times", "def calOffsets(self, Xi_arr, Vi_arr, hz):\n\n Wi_arr = [round(vi / hz, 6) for vi in Vi_arr] # tcptimestamps in seconds with microsecond precision\n Yi_arr = [(wi - xi) * 1000 for wi, xi in zip(Wi_arr, Xi_arr)] # offset in miliseconds\n offset_arr = [(round(x, 6), round(y, 6)) for x, y in zip(Xi_arr, Yi_arr)]\n return offset_arr", "def ms2pts(ms, dt):\n return int(ms/dt)", "def normalize_timestamp(timestamp_series):\n # convert datetime strings into milliseconds from epoch\n times = pd.to_datetime(timestamp_series, format='%Y-%m-%d %H:%M:%S').astype(np.int64) // int(1e6)\n return times", "def time_and_offset(list_of_ntp_servers):\r\n for ntp_server in list_of_ntp_servers:\r\n offset = clock_offset_from_ntp_server(ntp_server)\r\n if offset:\r\n return time.time(), offset\r\n # None of the ntp servers in the list returned values.\r\n return time.time(), 0.0", "def pcr_delta_time_ms(pcr_t1, pcr_t2, offset = 0):\n return float(pcr_t2-pcr_t1)/90000.0 + offset", "def ts_to_epoch_seconds(t) -> float:\n return t.astype(int) / 1e9", "def _GetTimestamp(radians):\r\n return vf_time.ComputeTimeAtPos(_GetArcCoords(radians))", "def parseLabels(labels):\n\n timestamp = [(parseTimestamp(e['gameTime']), e['label']) for e in labels['annotations']]\n \n return timestamp", "def calculate_timestamps(n_frames, window_shift):\n timestamps = np.arange(window_shift/2, n_frames*window_shift, window_shift)\n return timestamps", "def convert_to_utc(timestamp, format):\n epoch = datetime(1950, 1, 1)\n time = []\n dtime = []\n for t in timestamp:\n if format == 'old':\n dt = datetime.strptime(t, '%d/%m/%Y %H:%M:%S')\n elif format == 'new':\n dt = datetime.strptime(t, '%d-%b-%Y %H:%M')\n\n dt = dt - timedelta(hours=10)\n dtime.append(dt)\n time.append((dt - epoch).total_seconds())\n\n time = np.array(time) / 3600. / 24.\n dtime = np.array(dtime)\n return (dtime, time)", "def normalize_time(full_timestamps, half_timestamp):\n phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])\n return phases", "def pts2ms(pts, dt):\n return pts*dt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert datetime objects to GPS timestamp and nanoseconds
def datetime_to_gpstimestamp_nanoseconds(date): timestamp = gpstime.utc_to_gps(calendar.timegm(date.utctimetuple())) nanosecond = date.microsecond * 1000 return timestamp, nanosecond
[ "def convert_datetime_to_nanoseconds(datetime_obj):\n\n jan1_2001 = datetime.strptime(\"01-01-2001, 00:00:00.000000\", \"%m-%d-%Y, %H:%M:%S.%f\")\n\n difference = datetime_obj - jan1_2001\n\n UTC_time_diff = 7*60*60 # 7 hours converted into seconds\n\n seconds_difference = (difference.days*24*60*60) + difference.seconds + UTC_time_diff\n\n nanoseconds_difference = (seconds_difference*1000000000) + (difference.microseconds*1000)\n\n return nanoseconds_difference", "def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)", "def _GetTimestamp(radians):\r\n return vf_time.ComputeTimeAtPos(_GetArcCoords(radians))", "def from_Timestamp(timestamp):\n # type: (timestamp_pb2.Timestamp) -> float\n return timestamp.seconds + float(timestamp.nanos) / 10**9", "def ts(self, frame):\n whole = int.from_bytes(self._ts_data[frame, 0], 'little')\n fraction = int.from_bytes(self._ts_data[frame, 1], 'little')\n return np.datetime64(whole * 10**6 + fraction, 'us')", "def date_to_nano(ts):\n return calendar.timegm(ts.utctimetuple()) * int(1e3)", "def get_gps_timestamp(file, time_offset):\n reference_date = get_reference_datetime(file)\n absolute_date = get_absolute_datetime(reference_date, time_offset)\n timestamp, nanosecond = datetime_to_gpstimestamp_nanoseconds(absolute_date)\n\n return timestamp, nanosecond", "def convertToEST(timestamp):\n newDateTime = datetime.datetime.fromtimestamp(timestamp/1000)\n return newDateTime.date(), newDateTime.time()", "def to_timestamp(datetime_value):\n return int((datetime_value - datetime(1970, 1, 1, tzinfo=utc)).total_seconds())", "def _hydrate_time(self, nanoseconds, tz=None):\n seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))\n minutes, seconds = map(int, divmod(seconds, 60))\n hours, minutes = map(int, divmod(minutes, 60))\n seconds = (1000000000 * seconds + nanoseconds) / 1000000000\n t = Time(hours, minutes, seconds)\n if tz is None:\n return t\n tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)\n zone = FixedOffset(tz_offset_minutes)\n return zone.localize(t)", "def convert_time(rawtimes):\n\n # rawtimes is in units of CXC seconds, or seconds since 1998.0\n # Compute the Delta T between 1998.0 (CXC's Epoch) and 1970.0 (Unix Epoch)\n\n seconds_since_1998_0 = rawtimes[0]\n\n cxctime = dt.datetime(1998, 1, 1, 0, 0, 0)\n unixtime = dt.datetime(1970, 1, 1, 0, 0, 0)\n\n # Calculate the first offset from 1970.0, needed by matplotlib's plotdate\n # The below is equivalent (within a few tens of seconds) to the command\n # t0 = Chandra.Time.DateTime(times[0]).unix\n delta_time = (cxctime - unixtime).total_seconds() + seconds_since_1998_0\n\n plotdate_start = epoch2num(delta_time)\n\n # Now we use a relative offset from plotdate_start\n # the number 86,400 below is the number of seconds in a UTC day\n\n times = (np.asarray(rawtimes) - rawtimes[0]) / 86400. + plotdate_start\n\n return times", "def gps2Time(self):\n self.posting_date = Time(self.posting_gpstime, format=\"gps\")", "def convert_datetime(\n date: float | np.ndarray,\n epoch: str | tuple | list | np.datetime64 = _unix_epoch\n ):\n # convert epoch to datetime variables\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # convert to delta time\n return (date - epoch) / np.timedelta64(1, 's')", "def get_times(self, times):\n times = at.Time(times / 86400., format='mjd')\n return times.isot, times", "def to_epoch_milli(self):\n millis = self.seconds * 1000\n return millis + self.nanos / 1000000", "def ts_to_epoch_seconds(t) -> float:\n return t.astype(int) / 1e9", "def get_stamp(float_time):\n\n # Convert to time in seconds (float) to time in nanoseconds (int)\n nsec = rospy.Time.from_sec(float_time)\n\n # Get the number of 'whole' seconds\n sec = int(float_time)\n \n return sec, nsec.nsecs", "def to_pydatetime(self) -> npt.NDArray[np.object_]:\n return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)", "def convert_values_to_timestamp(self, seconds_since_1970,\n fractional_seconds):\n return self.vb.convert_values_to_timestamp(seconds_since_1970,\n fractional_seconds)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the reference datetime from the KNMI LGT file as datetime
def get_reference_datetime(file): date_string = file.root.discharge1._f_getAttr('reference_datetime')[0] ref_date = datetime.datetime.strptime(date_string, '%d-%b-%Y;%H:%M:%S.%f') return ref_date
[ "def _get_harvest_datetime(self, filepath: str) -> str:\n\n filename = os.path.basename(filepath)\n file_tokens = filename.split(\"_\")\n return file_tokens[2][:-5]", "def getPLX_DateTime(fileName, oc=mc):\n\n if not oc:\n oc = matlab.engine.start_matlab()\n else:\n oc.clear(nargout=0)\n oc.cd(str(os.getcwd()), nargout=0)\n\n # DateTime output argument for the plx_information function is the last of 13 produced\n dT_str = oc.plx_information(fileName, nargout=13)[-1]\n\n # Use strptime function to parse the DateTime string into a datetime.datetime function, format\n # string is 'mm/dd/YYYY HH:MM:SS'. Note that I had to modify the `PlexMethods.cpp` file, \n # located in `Matlab Offline Files SDK/mexPlex/` (line 1458) for this to actually work\n dT = datetime.strptime(dT_str, '%m/%d/%Y %H:%M:%S')\n\n return dT", "def _read_antti_datetime(dt_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'r')\n else:\n ff = open(dt_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n ymdHMS = np.genfromtxt(sIO, comments=\"%\")\n DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])\n sIO.close()\n\n return DT", "def _get_rec_datetime(self):\n\n rec_datetime = None\n date_string = ''\n time_string = ''\n datetime_string = ''\n\n if 'notes' not in self.info:\n return None\n\n for note_line in self.info['notes'].split('\\n'):\n\n # episodic acquisition mode\n if note_line.startswith('Created on '):\n date_string = note_line.strip('Created on ')\n if note_line.startswith('Start data acquisition at '):\n time_string = note_line.strip('Start data acquisition at ')\n\n # continuous acquisition mode\n if note_line.startswith('Created : '):\n datetime_string = note_line.strip('Created : ')\n\n if date_string and time_string:\n datetime_string = ' '.join([date_string, time_string])\n\n if datetime_string:\n try:\n rec_datetime = datetime.strptime(datetime_string,\n '%a %b %d %Y %H:%M:%S')\n except ValueError:\n pass\n\n return rec_datetime", "def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()", "def getTimestamp(filename):\n\ttxt = re.search(r'IMERG.(\\d+)-S\\d+-E(\\d+)',filename)\n\ttimestring = txt.group(1) + \" \" + txt.group(2)\n\ttime_stamp = pd.to_datetime(timestring, infer_datetime_format = True)\n\treturn time_stamp", "def reference_date(self) -> datetime.datetime:\n tstamp = self.__get_valid_tstamps()\n if tstamp is None:\n raise ValueError('no valid timestamps found')\n\n return datetime.datetime.combine(\n tstamp[0].date(), datetime.time(0), tstamp[0].tzinfo)", "def extract_date_vnp46a2(geotiff_path):\n # Get date (convert YYYYJJJ to YYYYMMDD)\n date = dt.datetime.strptime(\n os.path.basename(geotiff_path)[9:16], \"%Y%j\"\n ).strftime(\"%Y%m%d\")\n\n return date", "def read_datetime(self):\n with GPIOTimingContentManager(self.gpio, start=self._start_tx, end=self._end_tx):\n self._write_byte(self.REG_BURST_READ)\n\n regs = list()\n for _ in range(self.REG_SIZE):\n regs.append(self._read_byte())\n\n # Decode bytes to datetime\n return datetime.datetime.strptime(\" \".join([\"{:x}\".format(x) for x in regs]), self.DT_STR_FMT)", "def get_file_date(tree):\n \n date = tree.find(\"{http://www.dspin.de/data/metadata}MetaData/{http://www.dspin.de/data/metadata}source/{http://www.clarin.eu/cmd/}CMD/{http://www.clarin.eu/cmd/}Components/{http://www.clarin.eu/cmd/}teiHeader/{http://www.clarin.eu/cmd/}fileDesc/{http://www.clarin.eu/cmd/}sourceDesc/{http://www.clarin.eu/cmd/}biblFull/{http://www.clarin.eu/cmd/}publicationStmt/{http://www.clarin.eu/cmd/}date\").text \n return date", "def extract_date_vnp46a1(geotiff_path):\n # Get date (convert YYYYJJJ to YYYYMMDD)\n date = dt.datetime.strptime(\n os.path.basename(geotiff_path)[9:16], \"%Y%j\"\n ).strftime(\"%Y%m%d\")\n\n return date", "def reference_time(self):\n if hasattr(self, '_reference_time') is False:\n self._reference_time = self.midtime\n\n return self._reference_time", "def get_datetime_magfile(f):\n \n f = f.split(\"/\")[-1] # look at mag_grid*.out part\n b = f[10:]\n td = datetime.datetime.strptime(b, \"%Y%m%d-%H%M%S.out\")\n\n return td", "def datetime(self):\r\n if 'observation_time_rfc822' in self.data \\\r\n and self.data['observation_time_rfc822']:\r\n tstr = self.data['observation_time_rfc822']\r\n tstr = ' '.join(tstr.split(' ')[:-2])\r\n return datetime.strptime(tstr, '%a, %d %b %Y %H:%M:%S')\r\n elif 'observation_time' in self.data:\r\n return datetime.strptime(self.data['observation_time'] \\\r\n +' %s'%datetime.now().year,\r\n 'Last Updated on %b %d, %H:%M %p %Z %Y')\r\n return ''", "def datetimefromfilename(filename):\n from datetime import datetime\n string = filename.split('timestamped/')[1].split('-CH')[0]\n return datetime.strptime(string, '%Y-%m-%d_%H-%M-%S')", "def import_time(self) -> str:\n return pulumi.get(self, \"import_time\")", "def get_nc_time(nc_filepath):\n try:\n with Dataset(nc_filepath) as f:\n time_var = f.variables['time']\n time_raw = time_var[-1]\n time_units = time_var.units\n time_calendar = getattr(time_var, 'calendar', 'gregorian')\n time_conv = utime(time_units, time_calendar)\n\n return pd.to_datetime(time_conv.num2date(time_raw))\n except Exception as e:\n print(str(e))\n return pd.NaT", "def find_exif_datetime(fname):\n fhandle = open(fname, 'rb')\n tags = exifread.process_file(fhandle, stop_tag='DateTime', details=False)\n for tag in tags.keys():\n if tag == 'Image DateTime':\n return str(tags[tag])\n return", "def convert_to_datetime(line):\n timestamp = re.findall(r\"\\d+\\-\\d+\\-\\w+\\:\\d+\\:\\d+\", line)\n #print('Timestamp extracted is ' + timestamp[0])\n return (datetime.strptime(timestamp[0], \"%Y-%m-%dT%H:%M:%S\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
chang the order's status to be "cooking" which is selected by the id of order
def cook_order(request): order_id = request.GET.get('order_id', 0) cs , status = CookStatus.objects.get_or_create(cook_name=request.user) if cs.current_order is None: cs.current_order = Order.objects.get(id=order_id) cs.current_order.status = 'cooking' cs.current_order.tikchen = request.user.username cs.current_order.save() cs.save() return HttpResponseRedirect("/staff/cook_order_list/")
[ "def order_ready(request):\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\tif cs.current_order is not None:\n\t\tcs.current_order.status = 'ready-to-serve'\n\t\tcs.current_order.save()\n\t\tcs.current_order = None\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")", "def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()", "def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)", "def change_delivery_status(self, order_id):\n for order in orders:\n if order_id == order['order_id']:\n order['status'] = delivered\n return order\n return False", "def update_specific_order(self,status,order_id):\n self.query = \"UPDATE orders SET order_status=%s WHERE order_id=%s\"\n self.input = (status,order_id) #tuple to support indexing\n self.query_1 = \"SELECT order_id FROM orders ORDER BY order_id DESC LIMIT 1.\"\n self.query_2 = \"SELECT * FROM orders WHERE order_id=%s\"\n self.input_2 = (order_id,) #tuple to support indexing\n self.event = \"admin_update_specific_order\"\n self.error = \"Invalid order id\"\n self.message = \"Successfully updated the order.\"\n self.order_id = order_id\n self.db_error = None", "def order_status(self, order_status):\n\n self._order_status = order_status", "def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result", "def cancel_order(self, order_id):\n for order in orders:\n if order_id == order['order_id']:\n order['status'] = canceled\n return order", "def edit_current_order(self):\n self.header(\"Edit order\")\n orders = self.__order_service.get_orders()\n editing_order = True\n while editing_order:\n if orders:\n self.__order_service.print_current_orders(orders)\n o_id = input(\"Select order by Id (\\33[;31mq to quit\\33[;0m\"\"): \")\n if o_id.lower() == 'q':\n editing_order = False\n break\n if o_id.isdigit():\n order = self.__order_service.get_order_by_id(int(o_id))\n if order:\n edited_order = Order(order[\"Kt\"], order[\"Name\"], order[\"License\"], order[\"From date\"], order[\"To date\"],\n order[\"Price\"], order[\"Insurance\"], order[\"Total price\"], order[\"Days\"])\n a_choice = ''\n while a_choice != 'q':\n print(\"1. Edit PPN/Kt\\n2. Edit name\\n3. Car-license\\n4. From date\\n5. To date\\n6. Price\\n\"\n \"7. Insurance\\n8. Days\\n\\n\"\"\\33[;31mPress q to go back \\33[;0m\\n\")\n a_choice = input(\"Choose an option: \").lower()\n if a_choice.lower() == 'q':\n break\n elif a_choice == \"1\":\n edited_order.set_kt(input(\"Enter new Kt: \").translate(remove_punct_map))\n elif a_choice == '2':\n edited_order.set_renter(input(\"Enter new name: \").translate(remove_punct_map))\n elif a_choice == '3':\n edited_order.set_car(input(\"Enter new license: \").translate(remove_punct_map))\n elif a_choice == '4':\n edited_order.set_from_date(\n datetime.datetime.strftime(self.__car_service.user_date(\"Enter new from date: \"), \"%d/%m/%y\"))\n elif a_choice == '5':\n edited_order.set_to_date(\n datetime.datetime.strftime(self.__car_service.user_date(\"Enter new to date: \"), \"%d/%m/%y\"))\n elif a_choice == '6':\n edited_order.set_price(input(\"Enter new price: \").translate(remove_punct_map))\n elif a_choice == '7':\n edited_order.set_insurance(input(\"Enter new insurance \\33[;32mY\\33[;0m/\\33[;31mN\\33\"\n \"[;0m: \").translate(remove_punct_map))\n elif a_choice == '8':\n edited_order.set_days(input(\"Enter number of days: \").translate(remove_punct_map))\n else:\n print(\"\\n\\33[;31mWrong input try again\\33[;0m\\n\")\n self.__order_service.remove_order(o_id)\n self.__order_service.add_order(edited_order, True)\n print(\"\\nOrder edited\\n\")\n editing_order = False\n else:\n print(\"\\n\\33[;31mWrong input try again\\33[;0m\\n\")\n else:\n print(\"\\n\\33[;31mWrong input try again\\33[;0m\\n\")\n else:\n print(\"No orders to edit\\n\")\n input(\"\\33[;32mPress enter to continue \\33[;0m\")\n break\n input(\"\\33[;32mPress enter to continue \\33[;0m\")", "def mark_as_read(request, pk):\n \n order = meds.objects.get(id=pk)\n \n\n if request.method == \"POST\":\n order.read = True\n order.save()\n return redirect('profile')\n \n return render(request, 'confirmation.html', {'item':order})", "async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]", "def item_status(m, mattr):\n\t m.status = item_status_choices[0][0]", "def change_status(self, status, application_id):", "def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')", "def order_status_changed(self, docdataorder, old_status, new_status):\n project_status = appsettings.DOCDATA_ORDER_STATUS_MAPPING.get(new_status, new_status)\n cascade = appsettings.OSCAR_ORDER_STATUS_CASCADE.get(project_status, None)\n\n # Update the order in Oscar\n # Not using Order.set_status(), forcefully set it to the current situation.\n order = Order.objects.get(number=docdataorder.merchant_order_id)\n order.status = project_status\n if cascade:\n order.lines.all().update(status=cascade)\n order.save()\n\n # Send the signal\n super(Facade, self).order_status_changed(docdataorder, old_status, new_status)", "def changestatus(request, complaint_id, status):\n if status == '3':\n StudentComplain.objects.filter(id=complaint_id).\\\n update(status=status, worker_id='')\n return HttpResponseRedirect('/complaint/caretaker/')\n elif status == '2':\n StudentComplain.objects.filter(id=complaint_id).\\\n update(status=status, worker_id='')\n return HttpResponseRedirect('/complaint/caretaker/')\n else:\n StudentComplain.objects.filter(id=complaint_id).\\\n update(status=status)\n return HttpResponseRedirect('/complaint/caretaker/')", "def change_status(reservation_id):\n\n db.session.query(Reservation).filter_by(id=reservation_id)\\\n .update({ 'reservation_status': 'Open', 'user_id': None})\n db.session.commit()", "def set_OrderStatus(self, value):\n super(ListOrdersInputSet, self)._set_input('OrderStatus', value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
chang the order's status to be "readytoserve" which is selected by the id of order
def order_ready(request): cs , status = CookStatus.objects.get_or_create(cook_name=request.user) if cs.current_order is not None: cs.current_order.status = 'ready-to-serve' cs.current_order.save() cs.current_order = None cs.save() return HttpResponseRedirect("/staff/cook_order_list/")
[ "def change_delivery_status(self, order_id):\n for order in orders:\n if order_id == order['order_id']:\n order['status'] = delivered\n return order\n return False", "async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]", "def order_status(self, order_status):\n\n self._order_status = order_status", "def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result", "def api_mark_order_ready(request, id):\n\n close_old_connections()\n \n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Get the order.\n order = Order.objects.get(id=id)\n \n # Mark the order as served and save it.\n order.ready = not order.ready\n order.save()\n\n close_old_connections()\n\n return HttpResponse('Marked as ready')", "def update_specific_order(self,status,order_id):\n self.query = \"UPDATE orders SET order_status=%s WHERE order_id=%s\"\n self.input = (status,order_id) #tuple to support indexing\n self.query_1 = \"SELECT order_id FROM orders ORDER BY order_id DESC LIMIT 1.\"\n self.query_2 = \"SELECT * FROM orders WHERE order_id=%s\"\n self.input_2 = (order_id,) #tuple to support indexing\n self.event = \"admin_update_specific_order\"\n self.error = \"Invalid order id\"\n self.message = \"Successfully updated the order.\"\n self.order_id = order_id\n self.db_error = None", "def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()", "def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)", "def order_status_changed(self, docdataorder, old_status, new_status):\n project_status = appsettings.DOCDATA_ORDER_STATUS_MAPPING.get(new_status, new_status)\n cascade = appsettings.OSCAR_ORDER_STATUS_CASCADE.get(project_status, None)\n\n # Update the order in Oscar\n # Not using Order.set_status(), forcefully set it to the current situation.\n order = Order.objects.get(number=docdataorder.merchant_order_id)\n order.status = project_status\n if cascade:\n order.lines.all().update(status=cascade)\n order.save()\n\n # Send the signal\n super(Facade, self).order_status_changed(docdataorder, old_status, new_status)", "def order_status(self, obj):\n return obj.order.all()[0].status", "def update_status(status):", "def item_status(m, mattr):\n\t m.status = item_status_choices[0][0]", "def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def do(self):\n # print(\"updating quest status\")\n Quest.objects.update_status()", "def confirm(self):\n for item in self.item_set.filter(~Q(statuses__status=sts_const.CONFIRMED)):\n item.change_status(sts_const.CONFIRMED)\n\n signals.order_confirm.send(instance=self, now=timezone.now())", "def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')", "def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order", "def set_add_dispute_status(self, status):\n self.single_selection_from_kendo_dropdown(self.add_dispute_status_kendo_dropdown_locator, status)\n self.wait_for_ajax_spinner_load()", "def get_order_status(order_id):\n action = request.args.get(\"action\")\n\n if action not in (\"accept\", \"reject\"):\n return jsonify(error=RET.PARAMERR, errmsg=\"参数错误\")\n\n # 1.根据order_id找到对应的订单\n try:\n order = Order.query.filter(Order.id == order_id, Order.status == \"WAIT_ACCEPT\").first()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(error=RET.DBERR, errmsg=\"查询数据错误\")\n if not order:\n return jsonify(error=RET.NODATA, errmsg=\"订单不存在\")\n\n # 2.判断当前的登录用户是否是该订单对应的房东\n user_id = g.user_id\n\n landlord_id = order.house.user_id\n if user_id != landlord_id:\n return jsonify(error=RET.ROLEERR, errmsg=\"不允许修改订单状态\")\n\n # 3.修改订单状态\n if action == \"accept\":\n order.status = \"WAIT_COMMENT\"\n else:\n order.status = \"REJECTED\"\n reason = request.json.get(\"reason\")\n if not reason:\n return jsonify(error=RET.PARAMERR, errmsg=\"请输入拒单原因\")\n order.comment = reason\n # 4.保存到数据库\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(error=RET.DBERR, errmsg=\"数据库存储错误\")\n return jsonify(error=RET.OK, errmsg=\"ok\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format trajectory into a list of tuples before they are stored in memory. Trajectory is list of (s,a,r,s,d) tuples
def formatTrajectory(self, trajectory): return self.RLModel.formatTrajectory(trajectory)
[ "def write_trajectory2(trajectory, positions, velocities, accelerations, effort):\n\n # needed to set a single point in the right format.\n positions = [positions]\n velocities = [velocities]\n accelerations = [accelerations]\n effort = [effort]\n # needed to empty the previous portion of the message which contains the point\n trajectory.points = []\n\n for i in range(0,len(positions)):\n points = JointTrajectoryPoint()\n points.positions = positions[i]\n points.velocities = velocities[i]\n points.accelerations = accelerations[i]\n points.effort = effort[i]\n\n trajectory.points.append(points)\n print(trajectory)", "def parse_trajectory(path: str) -> Optional[List[Dict[str, tuple]]]:\n lines = _get_lines_from_file(path)\n\n ess_file = False\n if path.split('.')[-1] != 'xyz':\n try:\n log = ess_factory(fullpath=path, check_for_errors=False)\n ess_file = True\n except (InputError, RMGInputError):\n ess_file = False\n\n if ess_file:\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_trajectory only supports Gaussian files, got {type(log)}')\n traj = list()\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Input orientation:' in lines[i]:\n i += 5\n xyz_str = ''\n while len(lines) and '--------------------------------------------' not in lines[i]:\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n traj.append(str_to_xyz(xyz_str))\n i += 1\n\n else:\n # this is not an ESS output file, probably an XYZ format file with several Cartesian coordinates\n skip_line = False\n num_of_atoms = 0\n traj, xyz_lines = list(), list()\n for line in lines:\n splits = line.strip().split()\n if len(splits) == 1 and all([c.isdigit() for c in splits[0]]):\n if len(xyz_lines):\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n num_of_atoms = int(splits[0])\n skip_line = True\n xyz_lines = list()\n elif skip_line:\n # skip the comment line\n skip_line = False\n continue\n else:\n xyz_lines.append(line)\n\n if len(xyz_lines):\n # add the last point in the trajectory\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n\n if not len(traj):\n logger.error(f'Could not parse trajectory from {path}')\n return None\n return traj", "def _print_tisserand_lists(self, Trajectory=[]):\n\t\n\timport numpy as np\n\t\n\tn = len(Trajectory);\n\trpl = [];\n\tral = [];\n\tpl = [];\n\tvinfl = [];\n\tfor i in range(n):\n\t\tral.append(Trajectory[i][6]);\n\t\trpl.append(Trajectory[i][5]);\n\t\tpl.append(Trajectory[i][7]);\n\t\tvinfl.append(Trajectory[i][8]);\n\t\n\tprint 'list_ra_python = [',\n\tn = len(ral);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % ral[i],\n\tprint '%f];' % ral[n-1];\n\t\n\tprint 'list_rp_python = [',\n\tn = len(rpl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % rpl[i],\n\tprint '%f];' % rpl[n-1];\n\t\n\tprint 'list_period_python = [',\n\tn = len(pl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % pl[i],\n\tprint '%f];' % pl[n-1];\n\t\n\tprint 'list_vinf_python = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % np.linalg.norm(vinfl[i]),\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % np.linalg.norm(vinfl[n-1]);\n\t\n\tprint 'list_vinf_python_x = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][0],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][0];\n\t\n\tprint 'list_vinf_python_y = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][1],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][1];\n\t\n\tprint 'list_vinf_python_z = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][2],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][2];", "def format_coords(self, coordinates):\n\n coords = []\n for i in range(0, len(coordinates), 3):\n coords.append(tuple(coordinates[i:i+3]))\n\n return coords", "def convert_shapely_points_to_tuples(list_of_points) -> list:\n return [(p.x, p.y) for p in list_of_points]", "def read_frame_trajectory_file( filename ):\n file = open(filename, \"r\")\n\n timestamps = list()\n path = list()\n\n for line in file:\n # eliminate leading spaces\n line = line.strip()\n\n # ignore comments and empty lines\n if len(line) == 0 or line[0] == '#':\n continue\n\n # divide on whitespace and convert to numbers\n nums = [float(x) for x in line.split()]\n \n # separate out components and build lists\n\n timestamps.append( nums[0] )\n\n origin = list( nums[1:4] )\n unitx = list( nums[4:7] )\n unity = list( nums[7:10] )\n unitz = list( nums[10:13] )\n\n path.append( list( (origin, unitx, unity, unitz ) ) )\n\n return path, timestamps", "def rttm2simple(rttm:list) -> list:\n output = list()\n for line in rttm:\n _, _, _, start, duration, _, _, label, _, _ = line.split()\n end = float(start)+float(duration)\n output.append((f\"{label}\", float(start), end))\n return output", "def get_position_list_from_trajectory(self, trajectories, pos):\n result = list()\n for i in range(len(trajectories)):\n result.append(trajectories[i][pos])\n return result", "def decode_trajectory(self, trajectory, cues=None):\n decodings = []\n\n # now extract the point in the trajectory associated with each cue\n for c in cues:\n decoding = trajectory * self.C ** -c\n decodings.append(decoding)\n\n return decodings", "def dump_ue4_trajectory(name: str, trajectory: typing.Mapping[float, tf.Transform]) -> None:\n with open('unreal_trajectory_{0}.csv'.format(name), 'w') as output_file:\n output_file.write('Name,X,Y,Z,Roll,Pitch,Yaw\\n')\n for idx, timestamp in enumerate(sorted(trajectory.keys())):\n ue_pose = uetf.transform_to_unreal(trajectory[timestamp])\n output_file.write('{name},{x},{y},{z},{roll},{pitch},{yaw}\\n'.format(\n name=idx,\n x=ue_pose.location[0],\n y=ue_pose.location[1],\n z=ue_pose.location[2],\n roll=ue_pose.euler[0],\n pitch=ue_pose.euler[1],\n yaw=ue_pose.euler[2]))", "def to_tuple(self):\n return tuple(self.hopping_sequence_list)", "def get_list(self):\n if self.key == 'L':\n return array_to_list([self.key, self.timing, self.data])\n if self.key == 'T':\n return array_to_list([self.key, self.data, self.timing])\n tmp_data = copy.deepcopy(self.data)\n for i in range(len(self.data)):\n if isinstance(self.data[i], float):\n tmp_data[i] = str('%.3f' % tmp_data[i])\n if tmp_data[i].split('.')[1] == '000':\n tmp_data[i] = tmp_data[i].split('.')[0]\n return array_to_list([self.key, self.easing, self.timing, tmp_data])", "def trajectory_to_json(trajectory: Trajectory) -> str:\n # numpy arrays need to be converted to normal tuples\n return json.dumps(trajectory, cls=NumpyEncoder)", "def render_trajectory(self, trajectory):\n colors = ['#a0a0ff', '#7878ff', '#5050ff', '#2828ff', '#0000ff'] # 薄い -> 濃いの順\n\n MAX = 5\n slot = []\n count = 0\n\n for cell in trajectory:\n count += 1\n\n slot.insert(0, cell)\n\n if len(slot) > MAX:\n c = slot.pop(-1)\n self.canvas.itemconfig(c.tag(), fill=c.COLOR)\n\n for i, c in enumerate(reversed(slot)): # 一箇所にとどまった際に先頭の色を反映するため、後ろから更新していく\n self.canvas.itemconfig(c.tag(), fill=colors[i])\n time.sleep(0.1)\n self.label_count['text'] = f'count: {count}'\n self.root.update()\n \n # 軌跡を削除\n for cell in slot:\n self.canvas.itemconfig(cell.tag(), fill=cell.COLOR)", "def getTrip(x):\r\n\ta,b,c=x[0],x[1],x[2]\r\n\tt2=(a,c,b)\r\n\tt3=(b,a,c)\r\n\tt4=(b,c,a)\r\n\tt5=(c,a,b)\r\n\tt6=(c,b,a)\r\n\treturn[t2,t3,t4,t5,t6]", "def __grilleTupleToList__(self):\n return [list(l) for l in self.grille]", "def parseTupleList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"),\",\"*\")\r\n string = string.replace(\"(\", \"\")\r\n string = string.replace(\")\", \"\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = tuple(string[i])\r\n return string", "def read_garda_stations_tuples():\n all_stations = []\n file = open('garda_stations.txt', 'r')\n for line in file:\n line = line.replace('\\n','')\n new_tuple = tuple(line.split('\\t'))\n all_stations.append(new_tuple)\n file.close()\n return all_stations", "def twist_msg_to_tup(self, msg):\n return ((msg.linear.x, msg.linear.y, msg.linear.z),\n (msg.angular.x, msg.angular.y, msg.angular.z))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the environment is batched or not. If the environment supports batched observations and actions, then overwrite this property to True. A batched environment takes in a batched set of actions and returns a batched set of observations. This means for all numpy arrays in the input and output nested structures, the first dimension is the batch size. When batched, the leftmost dimension is not part of the action_spec or the observation_spec and corresponds to the batch dimension. When batched and handle_auto_reset, it checks `np.all(steps.is_last())`.
def batched(self) -> bool: return False
[ "def is_batch():\n\n pass", "def has_batch(self) -> bool:\n return self.has_any() and (\n self._batch_size is None or self._buffer_size >= self._batch_size\n )", "def get_batch_capable(self):\n return False", "def has_full_batch(self) -> bool:", "def should_handle_all_batches(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_handle_all_batches\")", "def is_batch(self):\n return self.name == 'N'", "def is_batch_job(self):\n\n return isinstance(self.job, Job)", "def consistent_batch_size(shapes) -> bool:\n return same([shape[0] for shape in shapes])", "def dispatch_one_batch(self, iterator):\n\n if self._aborting:\n return False\n\n batch_size = self._get_batch_size()\n\n with self._lock:\n # to ensure an even distribution of the workload between workers,\n # we look ahead in the original iterators more than batch_size\n # tasks - However, we keep consuming only one batch at each\n # dispatch_one_batch call. The extra tasks are stored in a local\n # queue, _ready_batches, that is looked-up prior to re-consuming\n # tasks from the origal iterator.\n try:\n tasks = self._ready_batches.get(block=False)\n except queue.Empty:\n # slice the iterator n_jobs * batchsize items at a time. If the\n # slice returns less than that, then the current batchsize puts\n # too much weight on a subset of workers, while other may end\n # up starving. So in this case, re-scale the batch size\n # accordingly to distribute evenly the last items between all\n # workers.\n n_jobs = self._cached_effective_n_jobs\n big_batch_size = batch_size * n_jobs\n\n islice = list(itertools.islice(iterator, big_batch_size))\n if len(islice) == 0:\n return False\n elif (iterator is self._original_iterator and\n len(islice) < big_batch_size):\n # We reached the end of the original iterator (unless\n # iterator is the ``pre_dispatch``-long initial slice of\n # the original iterator) -- decrease the batch size to\n # account for potential variance in the batches running\n # time.\n final_batch_size = max(1, len(islice) // (10 * n_jobs))\n else:\n final_batch_size = max(1, len(islice) // n_jobs)\n\n # enqueue n_jobs batches in a local queue\n for i in range(0, len(islice), final_batch_size):\n tasks = BatchedCalls(islice[i:i + final_batch_size],\n self._backend.get_nested_backend(),\n self._reducer_callback,\n self._pickle_cache)\n self._ready_batches.put(tasks)\n\n # finally, get one task.\n tasks = self._ready_batches.get(block=False)\n if len(tasks) == 0:\n # No more tasks available in the iterator: tell caller to stop.\n return False\n else:\n self._dispatch(tasks)\n return True", "def can_batch_update(self):\n # type: () -> bool\n return self.batch_flags & BatchFlags.BATCH_UPDATE", "def supports_repository_batch(self):\n return # boolean", "def _batch(self, batch_size):\n transform_or_spec = self._specs.get(\n 'transform_or_spec', self.transform_or_spec)\n if hasattr(transform_or_spec, '_batch'):\n transform_or_spec = transform_or_spec._batch(batch_size)\n return _DeferredTensorSpec(\n self._get_batched_input_spec(batch_size),\n transform_or_spec=transform_or_spec,\n dtype=self.dtype,\n shape=(None if self.shape is None\n else tf.TensorShape([batch_size]).concatenate(self.shape)),\n name=self.name,\n also_track_spec=self._also_track_spec)", "def _implements_predict_batch_hooks(self):\n return not is_default(self.on_predict_batch_begin) or not is_default(\n self.on_predict_batch_end\n )", "def test_batch_size(self):\n return self._test_batch_size", "def batch_size(self):\n return self.computation_context.batch_size", "def _implements_train_batch_hooks(self):\n return not is_default(self.on_train_batch_begin) or not is_default(\n self.on_train_batch_end\n )", "def batch_size(self):\n if \"batch_size\" in self.augment_cfg:\n bs = self.augment_cfg[\"batch_size\"]\n logger.warning(f\"Batch Size Overwrite Found: running batch size {bs}\")\n return bs\n else:\n return self.plan[\"batch_size\"]", "def compute_batch(self, duplicate_manager=None,context_manager=None):\n from ...acquisitions import AcquisitionTS\n assert isinstance(self.acquisition, AcquisitionTS)\n \n X_batch,_ = self.acquisition.optimize()\n k=1\n \n # --- GET the remaining elements\n while k<self.batch_size:\n new_sample,_ = self.acquisition.optimize()\n X_batch = np.vstack((X_batch,new_sample))\n k +=1\n \n return X_batch", "def is_started(self):\n return self.batch is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the Environmet should reset given the current timestep. By default it only resets when all time_steps are `LAST`.
def should_reset(self, current_time_step: ts.TimeStep) -> bool: handle_auto_reset = getattr(self, '_handle_auto_reset', False) return handle_auto_reset and np.all(current_time_step.is_last())
[ "def should_reset(self) -> bool:\n changed_team = self._changed_training_team\n if self._changed_training_team:\n self._changed_training_team = False\n return changed_team", "def reset():\n return True", "def get_reset(self):\n return self.is_reset()", "def is_reset_task_states(self, task_state):\r\n return all(self.is_initial_child_state(child) for child in task_state)", "def is_reset(self):\n return self._tag == 'reset'", "def reset(self, **kwargs) -> Union[ObsType, Tuple[ObsType, dict]]:\n if self.checked_reset is False:\n self.checked_reset = True\n return passive_env_reset_check(self.env, **kwargs)\n else:\n return self.env.reset(**kwargs)", "def IsLocalRerun(self):\n return self.prev_test_run_key is not None", "def is_sut_reset_expected(self):\n return self._messagebus.send_request(\n IS_SUT_RESET_EXPECTED, entity=self._sut.entity).wait()[0].result()", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n return self.starting_state", "def AutoReset(self) -> bool:", "def force_restart(self) -> Optional[bool]:\n return pulumi.get(self, \"force_restart\")", "def is_reboot(self):\n return self.reboot is True", "def __bool__(self) -> bool:\n if self.initial_value == 1 and self.number_of_steps == 0:\n return True\n return False", "def is_trial_stopped_early(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_trial_stopped_early\")", "def IsRerun(self):\n return self.prev_test_context is not None", "def reset_time_step(self):\n\n self.__time_step = 0", "def reset_game(self):\n\t\treturn 1", "def is_trial_stopped_early(self) -> bool:\n return pulumi.get(self, \"is_trial_stopped_early\")", "def _means_resetting(self, value):\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines the observations provided by the environment. May use a subclass of `ArraySpec` that specifies additional properties such as min and max bounds on the values.
def observation_spec(self) -> types.NestedArraySpec:
[ "def augment_env_spec(cls, env_spec, latent_dim):\n\n aug_obs = akro.Box(low=np.concatenate((env_spec.observation_space.low, \n np.zeros(latent_dim))),\n high=np.concatenate((env_spec.observation_space.high, \n np.ones(latent_dim))),\n dtype=np.float32)\n aug_act = akro.Box(low=env_spec.action_space.low,\n high=env_spec.action_space.high,\n dtype=np.float32)\n return EnvSpec(aug_obs, aug_act)", "def __init__(self, _Obs, _MinSize, _MaxSize, _xmax, _ymax):\n\n self.xmax = _xmax\n self.ymax = _ymax\n\n self.Obs = [{'x': rn.rand()*_xmax,\n 'y': rn.rand()*_ymax,\n 'r': _MinSize + rn.rand()*(_MaxSize - _MinSize)\n } for _ in xrange(_Obs)\n ]", "def test_observation_space(self):\n # 200 buses, 49 generators, 4 shunts.\n n = 200 + 49 + 4\n\n self.assertEqual(\n (n,), self.env.observation_space.shape\n )\n\n # Lower bound should be 0.\n np.testing.assert_array_equal(\n np.zeros(n, dtype=self.env.dtype), self.env.observation_space.low\n )\n\n # Voltage cap at 2.\n np.testing.assert_array_equal(\n np.ones(200, dtype=self.dtype) + 1,\n self.env.observation_space.high[0:200])\n\n # All else at 1 (gen and shunt states)\n np.testing.assert_array_equal(\n np.ones(49+4, dtype=self.dtype),\n self.env.observation_space.high[200:]\n )", "def observation_spec(self) -> Dict[str, Any]:", "def __init__(self):\n super(INumpyArrayMetric, self).__init__()\n self.metric = 'INumpyArrayMetric'\n self.ground_truth = None # np.ndarray\n self.segmentation = None # np.ndarray", "def _default_specs(self):\n # Spectrometer specs\n self.model = \"Flame-S\" # Spectrometer model\n self.fov = None # Field of view fo spectrometer\n self.ILS = None # Number array holding instrument line shape (possibly don't hold this here?)\n self.pix_num = 2048 # Number of pixels\n self.bit_depth = 16 # Bit depth of spectrometer detector\n\n # File information\n self.file_ext = '.npy' # Spectra saved as numpy array\n self.file_ss = '{}ss' # Shutter speed format spec\n self.file_spec_type = {'meas': 'Plume', 'dark': 'Dark', 'cal': 'ppmm', 'clear': 'Clear'}\n self.file_datestr = \"%Y-%m-%dT%H%M%S\" # Date/time format spec in filename\n\n\n\n # Acquisition settings\n self.start_int_time = 100 # Starting integration time\n self.start_coadd = 5 # Number of spectra to coadd\n self.framerate = 1 # Framerate of acquisitions (Hz)\n self.wavelengths = None # Wavelengths (nm)\n self.spectrum = None # Spectrum\n self.spectrum_filename = None # Filename for spectrum\n\n self.auto_int = True # Bool for requesting automated integration time adjustment\n self.min_saturation = 0.5 # Minimum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.max_saturation = 0.9 # Maximum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.saturation_range = [320, 330] # Range of wavelengths used in checking integration time\n\n # Predefined list of integration times for automatic exposure adjustment\n self.int_list = np.concatenate((np.arange(1, 10, 1),\n np.arange(10, 50, 5),\n np.arange(50, 100, 10),\n np.arange(100, 500, 50),\n np.arange(500, 1000, 100),\n np.arange(10 ** 3, 10 ** 4, 500),\n np.array([10 ** 4])))", "def state_array_spec(self) -> Dict[str, Any]:", "def setObservations(self, observations):\n # Turn list of observations into a dict (skipping out any scales which\n # are in the constant list)\n self._observations = {}\n for o in observations:\n level_tuple = ScaleManager._level_tuple(*o[0])\n if level_tuple in ScaleManager._CONSTANT_SCALES:\n continue\n dist_stiff = np.array(o[1:3])\n self._observations[level_tuple] = np.column_stack(\n (self._observations[level_tuple], dist_stiff\n )) if level_tuple in self._observations else dist_stiff\n\n # Generate the scales\n self._generateScales()", "def setup_parameters(self):\n # Setting the parameters\n if 'parameters' in self.data:\n for name, values in self.data['parameters'].items():\n\n # Get Shape of Domain\n expected_shape = []\n for domain in getattr(self, name).Domains:\n expected_shape.append(domain.NumberOfPoints)\n\n if isinstance(values, collections.Iterable):\n i = 0\n for value in values:\n getattr(self, name).SetValue(i,value)\n i += 1\n else:\n getattr(self, name).SetValues(values)", "def test_observation_space(self):\n # Test shape.\n self.assertEqual(self.env.observation_space.shape, (self.env.num_obs,))\n\n # Test bounds. Bus voltages should have a high of 2, and the\n # rest should have a high of 1.\n self.assertTrue((self.env.observation_space.high[\n 0:self.env.num_buses] == 2.).all())\n self.assertTrue((self.env.observation_space.high[\n self.env.num_buses:] == 1.).all())\n self.assertTrue((self.env.observation_space.low == 0.).all())", "def observation_spec(self):\n # Get the inner observation spec, which is a dictionary.\n inner_obs_spec = super().observation_spec()\n # Convert it to a tuple of specs, in order of the observation names.\n\n flat_spec = []\n for name in self._observation_names:\n spec = inner_obs_spec[name]\n # For numerical specs, make sure they are an array and not a scalar.\n if spec.dtype != np.dtype(\"<U\") and not spec.shape:\n spec = dm_env_specs.Array(shape=(1,), dtype=spec.dtype, name=spec.name)\n flat_spec.append(spec)\n return tuple(flat_spec)", "def setup_variables(self):\n\n # Setting the specifications\n if 'specifications' in self.data:\n for name, value in self.data['specifications'].items():\n n = getattr(self, name).NumberOfPoints\n if n == 1:\n getattr(self, name).AssignValue(value)\n else:\n for i in range(n):\n getattr(self, name).AssignValue(i, value)\n\n if 'initial_conditions' in self.data:\n for name, value in self.data['initial_conditions'].items():\n n = getattr(self, name).NumberOfPoints\n if n == 1:\n getattr(self, name).SetInitialCondition(value)\n else:\n for i in range(n):\n getattr(self, name).SetInitialCondition(i, value)", "def __init__(self, data_array):\n self._data_array = data_array\n self._units = self._data_array.attrs.get('units', 'dimensionless')", "def __init__(self):\n self.demands = np.zeros(4)\n\n # Different axis maps for Windows and Linux\n self.axis_map = ([1, 2, 3, 0] if platform == \"win32\" else [1, 3, 4, 0])\n # windows controller map\n # axis[0] -> demands[3]\n # -axis[1] -> demand[0]\n # axis [2] -> demand[1]\n # -axis[3] -> demand[2]", "def __init__(self, dims=None, values=None):\n\n self.IS_AOA = False\n\n if values is not None and dims is not None:\n raise ValueError(\"please provide _either_ :dims: or :values:, not both\")\n\n if values is None and dims is None:\n self.values = np.zeros([1, 1])\n\n if values is not None:\n self.construct_values(values)\n\n if dims is not None:\n self.construct_dims(dims)", "def get_env_spec(cls, env_spec, latent_dim, module, max_episode_length):\n\n obs_dim = int(np.prod(env_spec.observation_space.shape))\n action_dim = int(np.prod(env_spec.action_space.shape))\n if module == 'encoder':\n in_dim = max_episode_length*(obs_dim + action_dim)\n out_dim = latent_dim * 2\n in_space = akro.Box(low=-1, high=1, shape=(in_dim, ), dtype=np.float32)\n out_space = akro.Box(low=-1,\n high=1,\n shape=(out_dim, ),\n dtype=np.float32)\n elif module == 'vf':\n out_dim = 1\n in_space = akro.Box(low=np.concatenate((\n env_spec.observation_space.low, np.zeros(latent_dim))), \n high=np.concatenate((env_spec.observation_space.high, \n np.ones(latent_dim))), \n dtype=np.float64)\n out_space = akro.Box(low=-np.inf,\n high=np.inf,\n shape=(out_dim, ),\n dtype=np.float64)\n if module == 'encoder':\n spec = InOutSpec(in_space, out_space)\n elif module == 'vf':\n spec = EnvSpec(in_space, out_space)\n\n return spec", "def _init_special_vars(self, T_start=None, T_end=None):\n self.min_energy = np.min(self.event_list_T[1][T_start:T_end])\n self.max_energy = np.max(self.event_list_T[1][T_start:T_end])\n self.min_time = np.min(self.event_list_T[0][T_start:T_end])\n self.max_time = np.max(self.event_list_T[0][T_start:T_end])", "def getInputSpecification(cls):\n specs = super().getInputSpecification()\n specs.addSub(InputData.parameterInputFactory('bins', contentType=InputTypes.IntegerType))\n specs.addSub(InputData.parameterInputFactory('variables', contentType=InputTypes.StringListType))\n specs.addSub(InputData.parameterInputFactory('source', contentType=InputTypes.StringType))\n return specs", "def observation_spec(self):\n observation = self.viewer._get_observations() if self.viewer_get_obs else self._get_observations()\n return observation" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current timestep.
def current_time_step(self) -> ts.TimeStep: return self._current_time_step
[ "def current_time_step(self) -> int:\n return self._current_time_step", "def time_step(self):\n return self._time_step", "def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step", "def time_step():\n return TimeStep()", "def timestep(self):\n return dt_round( datetime.timedelta(seconds=self.conf_float('dt')) )", "def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds", "def timestep(self):\n return str(self._timeunit)", "def integration_time(self):\n return self.current_it", "def timestep(self) -> Optional[float]:\n dt = None\n if len(self.time) > 1 and self.is_equidistant:\n dt = (self.time[1] - self.time[0]).total_seconds() # type: ignore\n return dt", "def get_time_step(self):\n for body in self.bodies:\n # If body is a Satelite\n if body.name == \"Satelite\":\n # Assuming that acceleration for a small times step is constant\n t = 0.01 * norm(body.velocity) / norm(body.acc)\n if t < self.t:\n return t\n return self.t", "def current_time(self) -> int:\n return self._simulator.iteration", "def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)", "def dt(self):\n if isinstance(self._time_axis, are_ax.RegularAxis):\n return self._time_axis.step\n raise RuntimeError(\"Time step is not available for orbits constructed with non-regular time axis\")", "def time_time_step():\n\treturn dsslib.SolutionF(ctypes.c_int32(27), ctypes.c_double(0))", "def full_step_time(self):\n\n total_step_time = self.duration()\n return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))", "def current_step(self):\n return self.dialog.current_step", "def base_step(self):\n if self._base_step is None:\n return get_base_step(self.scale)\n return self._base_step", "def last_step(self):\n return self.last_epoch", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the environment according to the action and returns a `TimeStep`. If the environment returned a `TimeStep` with `StepType.LAST` at the previous step the implementation of `_step` in the environment should call `reset` to start a new sequence and ignore `action`. This method will start a new sequence if called after the environment has been constructed and `reset` has not been called. In this case `action` will be ignored. If `should_reset(current_time_step)` is True, then this method will `reset` by itself. In this case `action` will be ignored.
def step(self, action: types.NestedArray) -> ts.TimeStep: if self._current_time_step is None or self.should_reset( self._current_time_step ): return self.reset() self._current_time_step = self._step(action) return self._current_time_step
[ "def step(self, action):\n action = self.randomization.action_randomizer.randomize(\n action, self._random_state\n )\n\n robot_exception = None\n try:\n self._act(action)\n except RobotException as re:\n logger.error(\n f\"Robot raised exception: {str(re)}. This will finish the current episode.\"\n )\n robot_exception = re\n\n if not self.constants.physical:\n # We don't need to do stepping for physical roll out.\n self.mujoco_simulation.step()\n\n self._synchronize_step_time()\n self.t += 1\n\n obs, reward, done, info = self.get_observation(robot_exception=robot_exception)\n obs, reward, done, info = self.step_finalize(obs, reward, done, info)\n return obs, reward, done, info", "def step(self, action):\n\n if not self._is_action_legal(action):\n return self.current_state, self.reward_illegal_action, self._is_terminal_state(), None\n else:\n # Change action passed if environment should behave random\n if self.stochastic:\n if not np.random.choice([True, False], 1, p=[self.p, 1 - self.p]):\n action = np.random.choice(self.possible_actions)\n\n # Needed for reward calculation (must be done before updating data structures)\n number_of_shifts = self._get_number_of_shifts(action)\n is_cargo_mandatory = int(self.vehicle_data[2][action] == 1)\n\n slot = self.end_of_lanes[self.current_Lane]\n self.loading_sequence += \"{}. Load Vehicle Type \\t {} \\t in Lane: \\t {} \\t Row: \\t {} \\n\" \\\n .format(self.sequence_no, action, self.current_Lane, slot)\n\n self.end_of_lanes[self.current_Lane] += self.vehicle_data[4][action]\n\n if self.vehicle_data[1][action] == -1 or \\\n self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action]:\n self.number_of_vehicles_loaded[action] += 1\n\n self.loaded_vehicles[self.current_Lane][self.vehicle_Counter[self.current_Lane]] = action\n self.vehicle_Counter[self.current_Lane] += 1\n\n # Update grids\n for i in range(self.vehicle_data[4][action]):\n self.grid.T[self.current_Lane][slot + i] = self.sequence_no\n self.grid_destination.T[self.current_Lane][slot + i] = self.vehicle_data[3][action]\n self.grid_vehicle_type.T[self.current_Lane][slot + i] = self.vehicle_data[0][action]\n\n # Update lowest destination data structure\n if self.vehicle_data[3][action] < self.lowest_destination[self.current_Lane]:\n self.lowest_destination[self.current_Lane] = self.vehicle_data[3][action]\n\n self.sequence_no += 1\n # Update according to lane selection heuristic\n self.current_Lane = self._get_minimal_lanes()[0]\n\n self.possible_actions = self.get_possible_actions_of_state()\n self.current_state = self._get_current_state()\n\n if self._is_terminal_state():\n # Calculate reward for terminal state\n free_spaces = np.sum(self._get_free_capacity()) / np.sum(self.total_capacity)\n mandatory_vehicles_left_to_load = np.sum(self.vehicle_data[1][self.mandatory_cargo_mask]\n - self.number_of_vehicles_loaded[self.mandatory_cargo_mask])\n reward_features = np.array(\n [is_cargo_mandatory, number_of_shifts, free_spaces, mandatory_vehicles_left_to_load])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, True, {}\n else:\n # Calculate reward\n reward_features = np.array([is_cargo_mandatory, number_of_shifts, 0, 0])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, False, {}", "def step(self, action):\n\n origin_state = self.env.current_state\n new_state, reward, episode_done = self.env.execute_action(action)\n\n self._update_Q(origin_state, action, new_state, reward)\n\n return new_state, reward, episode_done", "def reset_environment(self, env):\r\n\r\n state = env.reset()\r\n #state = state[0]\r\n #state = np.expand_dims(state, axis=0)\r\n\r\n for _ in range(np.random.randint(self.start_random_steps)):\r\n action = env.action_space.sample() # sample random action\r\n next_state, _, _, _ = env.step(action)\r\n #next_state = next_state[0]\r\n #next_state = np.expand_dims(next_state, axis=0)\r\n state = next_state\r\n\r\n return state", "def step(self, action: list) -> None:\n self._input = np.array(\n [self._thrust_surge(action[0]), self._moment_steer(action[1])]\n )\n w, q = odesolver45(\n self._state_dot, self._state, self.config.simulation.t_step_size\n )\n\n self._state = q\n self._state[2] = geom.princip(self._state[2])\n\n self._prev_states = np.vstack([self._prev_states, self._state])\n self._prev_inputs = np.vstack([self._prev_inputs, self._input])\n\n self._step_counter += 1", "def _env_move(self, action: str, **kwargs):\n\n # Update counter\n self._counters[action] += 1\n\n # Define step and times\n ref = self._refs[action]\n now = self.steps[action]\n\n step = int(ref / now) if ref > now else 1\n times = 1 if ref > now else int(now / ref)\n\n # Update agent in environment, if necessary\n if step == 1 or self._counters[action] % step:\n self.env.move(action=action, times=times, **kwargs)", "def reset_environment(self, env):\r\n\r\n state = env.reset()\r\n state = np.expand_dims(state, axis=1)\r\n\r\n for _ in range(np.random.randint(self.start_random_steps)):\r\n action = env.action_space.sample() # sample random action\r\n next_state, _, _, _ = env.step(action)\r\n next_state = np.expand_dims(next_state, axis=1) # this is required because it is 1 x traffic state size\r\n state = next_state\r\n\r\n return state", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n return self.starting_state", "def reset_time_step(self):\n\n self.__time_step = 0", "def step(self, action: int) -> None:\n\n assert self.action_space.contains(action), f'{action} ({type(action)}) invalid'\n # Check for void action, if so do not do anything\n if self.with_void and action == self.action_size - 1:\n logging.warning('Void action selected!')\n else:\n # Set actions as given by the key\n keys = self.actions[action]\n self.workload_manager.job_scheduler.sorting_key = keys[0]\n self.workload_manager.resource_manager.sorting_key = keys[1]\n # Schedule jobs\n self.workload_manager.schedule_jobs()", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(\n self._last_observation, self.action, reward, False,\n replay_buffer.PLACEHOLDER_RETURN_VALUE, self._curr_episode)\n self._train_step()\n\n self.action = self._select_action()\n return self.action", "def _time_step_to_initial_observation(\n self,\n time_step: TimeStep,\n environment_model: EnvironmentModel,\n ):\n\n observation = time_step.observation\n batch_size = get_outer_shape(observation, environment_model.observation_spec())\n # the time step comes from the real environment\n assert batch_size == (\n 1,\n ), f\"batch_size of time_step.observation = {batch_size} and it should be 1\"\n initial_observation = tf.repeat(observation, repeats=self._batch_size, axis=0)\n return initial_observation", "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "def interaction_step(self, state, env):\n action = self.training_strategy.select_action(self.online_model, state)\n new_state, reward, is_terminal, info = env.step(action)\n\n # ?\n # is_truncated = 'TimeLimit.truncated' in info and info['TimeLimit.truncated']\n # is_failure = is_terminal and not is_truncated\n # experience = (state, action, reward, new_state, float(is_failure))\n\n experience = (state, action, reward, new_state, float(is_terminal))\n\n # Store data\n self.replay_buffer.store(experience)\n self.episode_reward[-1] += reward\n self.episode_timestep[-1] += 1\n self.episode_exploration[-1] += int(self.training_strategy.exploratory_action_taken)\n\n return new_state, is_terminal", "def step(self, actions):\n results = [env.step(a) for env, a in zip(self.envs, actions)]\n new_obs, rewards, done, infos = map(np.array, zip(*results))\n\n # reset environments automatically\n for i in range(len(self.envs)):\n if done[i]:\n new_obs[i] = self.envs[i].reset()\n\n return new_obs, rewards, done, infos", "def step(self, action):\n self._agent.act(action)\n\n self._client.release()\n self._client.acquire()\n\n return self._get_single_state()", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(self._last_observation, self.action, reward, False)\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def step(self, action):\n assert action in (0, 1, 2, 3), (\"action must be 0 (up), 1 (right), \"\n \"2 (down), or 3 (left); \"\n \"not {}\".format(action))\n\n # Check if already finished\n if self.done:\n print('WARNING: calling step() even though the environment has '\n 'already returned done=True. Call reset() first before '\n 'step() again: anything beyond this is undefined behaviour.')\n\n # Get current state\n state = self.state\n row = state % 4\n col = state // 4\n\n # Do a step on the grid as given by action\n # TODO: Add slipperyness\n if action == 0: # up\n if row != 0:\n row -= 1\n elif action == 1: # right\n if col != len(self.grid[0]) - 1:\n col += 1\n elif action == 2: # down\n if row != len(self.grid) - 1:\n row += 1\n elif action == 3: # left\n if col != 0:\n col -= 1\n\n self.state = len(self.grid) * row + col\n\n # Reached goal?\n if self.grid[row][col] == 'G':\n return self.state, 1.0, True\n\n # Fallen into hole?\n done = (self.grid[row][col] == 'H')\n\n # Reward only 1.0 if goal is reached\n reward = 0.0\n\n return self.state, reward, done", "def step(self, action: Action) -> Feedback: # type: ignore\n self._action_counter += 1\n step_id = self._action_counter\n\n self._encode_and_send_action(action, step_id)\n\n # Wait (blocking!) for the response envelope from the environment\n in_envelope = self._queue.get(block=True, timeout=None) # type: Envelope\n\n msg = self._decode_percept(in_envelope, step_id)\n\n observation, reward, done, info = self._message_to_percept(msg)\n\n return observation, reward, done, info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
it makes the flow of a given input through the network, all data are stored in the layers "y" and "v"
def flow(input_): global number_of_neurons_by_layer if len(input_) != number_of_neurons_by_layer[0]: raise IndexError( f"\033[91mInput length is incorrect. It must be {number_of_neurons_by_layer[0]}.\033[m") layers[0]["y"][1:] = np.array(input_).flatten().reshape(len(input_), 1) for i_lay in range(1, len(layers)): layers[i_lay]["v"][:] = logistic( layers[i_lay]["weigths"] @ layers[i_lay-1]["y"] )
[ "def trainNet():", "def epoch(self, v, expected):\n self.V = []\n self.O_hidden = []\n self.O_output = []\n self.D_1 = []\n\n self.error = []\n\n\n self.forward(np.transpose([v]), np.transpose([expected]))\n self.backward()", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def inference(input_layer, para_data, train_phase, keep_prob):", "def train(self, X, y):", "def _feed_forward(self, inp):\n self._get_input_layer().forward(inp)\n self._get_output_layer().forward(self._get_input_layer().outp)", "def test_propagate(self):\n # Get network components\n data = array([[0], [1]])\n cdata = LabeledCData(data, labels=array([0, 1]))\n encoder = BinaryEncoding(cdata)\n unitary = ProductAnsatz(1)\n measure = Measurement(1, [0])\n qnn = Network([encoder, unitary, measure], \"1q-qvm\")\n\n # Propagate the zeroth data point\n out = qnn.propagate(0, shots=10)\n\n print(out)", "def forward(self, x):\n x=x.view(len(x),1,28,28)\n out=self.encoder(x)\n out=self.decoder(out)\n #print('out after decoder',out.shape)\n out=out.view(len(out),784)\n return out", "def run_neural_network(self, rover_input, weight_vec, rover_id):\n self.get_inputs(rover_input, rover_id)\n self.get_weights(weight_vec, rover_id)\n self.get_outputs(rover_id)", "def forward(self, x):\n sources = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n # s = self.L2Norm(x)\n s = x / x.norm(dim=1, keepdim=True)\n sources.append(s)\n # print(f'Adding1 of dim {s.shape}')\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n # print(f'Adding2 of dim {x.shape}')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n # print(f'Adding3 of dim {x.shape}')\n\n out_sources = [self.fproj1(sources[0]), self.fproj2(\n sources[1]), self.fproj3(sources[2])] + sources[3:]\n if self.cfg['resize_img'][0] >= 600:\n # To Reduce the computation\n return out_sources[1:]\n return out_sources", "def neural_network(self):\n D_in, H, D_out = self.num_states, 32, self.num_actions\n print(\"Neural net nodes: \", D_in, H, D_out)\n optimizer = keras.optimizers.Adam(lr=self.lr)\n self.model = keras.models.Sequential()\n self.model.add(\n keras.layers.Dense(\n H,\n input_dim=D_in,\n activation=\"relu\"))\n self.model.add(keras.layers.Dense(H, activation=\"relu\"))\n self.model.add(keras.layers.Dense(D_out, activation=\"linear\"))\n self.model.compile(loss=\"mse\", optimizer=optimizer)", "def build_UNet_graph(self):\n args = self.args\n # 1. unit \n with tf.name_scope('1.unit'):\n W1_1 = self.weight_variable([3,3,self.input_shape[2],16], 'W1_1')\n b1_1 = self.bias_variable([16], 'b1_1')\n Z1 = self.conv2d(self.x_data_tf, W1_1, 'Z1') + b1_1\n A1 = self.activation(self.batch_norm_layer(Z1)) # (.,128,128,16)\n A1_drop = self.dropout_layer(A1)\n W1_2 = self.weight_variable([3,3,16,16], 'W1_2')\n b1_2 = self.bias_variable([16], 'b1_2')\n Z2 = self.conv2d(A1_drop, W1_2, 'Z2') + b1_2\n A2 = self.activation(self.batch_norm_layer(Z2)) # (.,128,128,16)\n P1 = self.max_pool_2x2(A2, 'P1') # (.,64,64,16)\n # 2. unit \n with tf.name_scope('2.unit'):\n W2_1 = self.weight_variable([3,3,16,32], \"W2_1\")\n b2_1 = self.bias_variable([32], 'b2_1')\n Z3 = self.conv2d(P1, W2_1) + b2_1\n A3 = self.activation(self.batch_norm_layer(Z3)) # (.,64,64,32)\n A3_drop = self.dropout_layer(A3)\n W2_2 = self.weight_variable([3,3,32,32], \"W2_2\")\n b2_2 = self.bias_variable([32], 'b2_2')\n Z4 = self.conv2d(A3_drop, W2_2) + b2_2\n A4 = self.activation(self.batch_norm_layer(Z4)) # (.,64,64,32)\n P2 = self.max_pool_2x2(A4) # (.,32,32,32)\n # 3. unit\n with tf.name_scope('3.unit'):\n W3_1 = self.weight_variable([3,3,32,64], \"W3_1\")\n b3_1 = self.bias_variable([64], 'b3_1')\n Z5 = self.conv2d(P2, W3_1) + b3_1\n A5 = self.activation(self.batch_norm_layer(Z5)) # (.,32,32,64)\n A5_drop = self.dropout_layer(A5)\n W3_2 = self.weight_variable([3,3,64,64], \"W3_2\")\n b3_2 = self.bias_variable([64], 'b3_2')\n Z6 = self.conv2d(A5_drop, W3_2) + b3_2\n A6 = self.activation(self.batch_norm_layer(Z6)) # (.,32,32,64)\n P3 = self.max_pool_2x2(A6) # (.,16,16,64)\n # 4. unit\n with tf.name_scope('4.unit'):\n W4_1 = self.weight_variable([3,3,64,128], \"W4_1\")\n b4_1 = self.bias_variable([128], 'b4_1')\n Z7 = self.conv2d(P3, W4_1) + b4_1\n A7 = self.activation(self.batch_norm_layer(Z7)) # (.,16,16,128)\n A7_drop = self.dropout_layer(A7)\n W4_2 = self.weight_variable([3,3,128,128], \"W4_2\")\n b4_2 = self.bias_variable([128], 'b4_2')\n Z8 = self.conv2d(A7_drop, W4_2) + b4_2\n A8 = self.activation(self.batch_norm_layer(Z8)) # (.,16,16,128)\n P4 = self.max_pool_2x2(A8) # (.,8,8,128)\n # 5. unit \n with tf.name_scope('5.unit'):\n W5_1 = self.weight_variable([3,3,128,256], \"W5_1\")\n b5_1 = self.bias_variable([256], 'b5_1')\n Z9 = self.conv2d(P4, W5_1) + b5_1\n A9 = self.activation(self.batch_norm_layer(Z9)) # (.,8,8,256)\n A9_drop = self.dropout_layer(A9)\n W5_2 = self.weight_variable([3,3,256,256], \"W5_2\")\n b5_2 = self.bias_variable([256], 'b5_2')\n Z10 = self.conv2d(A9_drop, W5_2) + b5_2\n A10 = self.activation(self.batch_norm_layer(Z10)) # (.,8,8,256)\n \n # 6. unit\n with tf.name_scope('6.unit'):\n W6_1 = self.weight_variable([3,3,256,128], \"W6_1\")\n b6_1 = self.bias_variable([128], 'b6_1')\n U1 = self.conv2d_transpose(A10, 128) # (.,16,16,128)\n U1 = tf.concat([U1, A8], 3) # (.,16,16,256)\n Z11 = self.conv2d(U1, W6_1) + b6_1\n A11 = self.activation(self.batch_norm_layer(Z11)) # (.,16,16,128)\n A11_drop = self.dropout_layer(A11)\n W6_2 = self.weight_variable([3,3,128,128], \"W6_2\")\n b6_2 = self.bias_variable([128], 'b6_2')\n Z12 = self.conv2d(A11_drop, W6_2) + b6_2\n A12 = self.activation(self.batch_norm_layer(Z12)) # (.,16,16,128)\n # 7. unit \n with tf.name_scope('7.unit'):\n W7_1 = self.weight_variable([3,3,128,64], \"W7_1\")\n b7_1 = self.bias_variable([64], 'b7_1')\n U2 = self.conv2d_transpose(A12, 64) # (.,32,32,64)\n U2 = tf.concat([U2, A6],3) # (.,32,32,128)\n Z13 = self.conv2d(U2, W7_1) + b7_1\n A13 = self.activation(self.batch_norm_layer(Z13)) # (.,32,32,64)\n A13_drop = self.dropout_layer(A13)\n W7_2 = self.weight_variable([3,3,64,64], \"W7_2\")\n b7_2 = self.bias_variable([64], 'b7_2')\n Z14 = self.conv2d(A13_drop, W7_2) + b7_2\n A14 = self.activation(self.batch_norm_layer(Z14)) # (.,32,32,64)\n # 8. unit\n with tf.name_scope('8.unit'):\n W8_1 = self.weight_variable([3,3,64,32], \"W8_1\")\n b8_1 = self.bias_variable([32], 'b8_1')\n U3 = self.conv2d_transpose(A14, 32) # (.,64,64,32)\n U3 = tf.concat([U3, A4],3) # (.,64,64,64)\n Z15 = self.conv2d(U3, W8_1) + b8_1\n A15 = self.activation(self.batch_norm_layer(Z15)) # (.,64,64,32)\n A15_drop = self.dropout_layer(A15)\n W8_2 = self.weight_variable([3,3,32,32], \"W8_2\")\n b8_2 = self.bias_variable([32], 'b8_2')\n Z16 = self.conv2d(A15_drop, W8_2) + b8_2\n A16 = self.activation(self.batch_norm_layer(Z16)) # (.,64,64,32)\n # 9. unit \n with tf.name_scope('9.unit'):\n W9_1 = self.weight_variable([3,3,32,16], \"W9_1\")\n b9_1 = self.bias_variable([16], 'b9_1')\n U4 = self.conv2d_transpose(A16, 16) # (.,128,128,16)\n U4 = tf.concat([U4, A2],3) # (.,128,128,32)\n Z17 = self.conv2d(U4, W9_1) + b9_1\n A17 = self.activation(self.batch_norm_layer(Z17)) # (.,128,128,16)\n A17_drop = self.dropout_layer(A17)\n W9_2 = self.weight_variable([3,3,16,16], \"W9_2\")\n b9_2 = self.bias_variable([16], 'b9_2')\n Z18 = self.conv2d(A17_drop, W9_2) + b9_2\n A18 = self.activation(self.batch_norm_layer(Z18)) # (.,128,128,16)\n # 10. unit: output layer\n with tf.name_scope('10.unit'):\n W10 = self.weight_variable([1,1,16,args.num_class], \"W10\")\n b10 = self.bias_variable([args.num_class], 'b10')\n Z19 = self.conv2d(A18, W10) + b10\n A19 = tf.nn.sigmoid(self.batch_norm_layer(Z19)) # (.,128,128,1)\n print(A19.shape)\n \n self.z_pred_tf = tf.identity(Z19, name='z_pred_tf') # (.,128,128,1)\n self.y_pred_tf = tf.identity(A19, name='y_pred_tf') # (.,128,128,1)\n print(self.y_pred_tf.shape)\n \n print('Build UNet Graph: 10 layers, {} trainable weights'.format(\n self.num_of_weights([W1_1,b1_1,W1_2,b1_2,W2_1,b2_1,W2_2,b2_2,\n W3_1,b3_1,W3_2,b3_2,W4_1,b4_1,W4_2,b4_2,\n W5_1,b5_1,W5_2,b5_2,W6_1,b6_1,W6_2,b6_2,\n W7_1,b7_1,W7_2,b7_2,W8_1,b8_1,W8_2,b8_2,\n W9_1,b9_1,W9_2,b9_2,W10,b10])))", "def __init__(self,sess,vgg_path,vgg=\"True\",num_classes=2):\n\n output_channels=num_classes #This is number of classes of the output mask\n\n self.correct_label_mask = tf.placeholder(dtype = tf.float32, shape = (None, None, None, num_classes))\n if vgg:\n vgg_tag = 'vgg16'\n \n\n #names of differnet layers and initial values of pre-trained vgg16 net\n vgg_input_tensor_name = 'image_input:0' \n vgg_keep_prob_tensor_name = 'keep_prob:0' \n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n\n graph = tf.get_default_graph()\n #load tensors (by name) for required layers\n self.Input = graph.get_tensor_by_name(vgg_input_tensor_name) #input layer\n self.keep = graph.get_tensor_by_name(vgg_keep_prob_tensor_name) #Differnet layer information\n self.layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)\n self.layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)\n self.layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)\n else: \n print(\"Modigy the fcn_model function since you don't use VGG fine tune\") \n\n self.fcn_model(output_channels)", "def test_net_backpropagation_one_input(self):\n net = ecn.NeuralNet(2, (2,), 1)\n net.weights = self._set_initial_weights()\n \n dataset = [[1, 1]]\n targets = [[0]]\n \n net.train(dataset, targets, 0.5, 1)\n self.assertTrue((net.weights['h0'][0] == [-0.5934, 0.4066, 0.5066]).all())\n self.assertTrue((net.weights['h0'][1] == [-0.2097, 0.7903, 0.7903]).all())\n self.assertTrue((net.weights['y'][0] == [-0.3679, -0.4390, 0.8456]).all())\n print('Finished testing backpropagation one input\\n')", "def _build_forward_graph(self):\n\n print('[*] Building a Neural Turing Machine.')\n\n self._initalize_state()\n\n # present start token\n controller_out = self.controller.emit_feature_vector(self.start_token, self.r_t[0], reuse=None)\n self._read_write(controller_out, reuse=None)\n\n # present inputs\n print('Input chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.inputs[t], self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present end token\n controller_out = self.controller.emit_feature_vector(self.end_token, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present outputs\n print('Output chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.zeros, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n reuse = None if t == 0 else True\n self.outputs.append(self._decode_read_vector(self.r_t[-1], reuse=reuse))\n print('Done.')", "def build_PF_net(input_shape0, input_shape1, input_shape2, input_shape3, \n filters_1=5):\n \n \n # Create the placeholders for the inputs\n inputs0 = Input( shape=input_shape0 )\n inputs1 = Input( shape=input_shape1 )\n inputs2 = Input( shape=input_shape2 )\n inputs3 = Input( shape=input_shape3 )\n \n # Create the encoder branches \n branch0 = encoder( inputs0, filters_1 )\n branch1 = encoder( inputs1, filters_1 )\n branch2 = encoder( inputs2, filters_1 )\n branch3 = encoder( inputs3, filters_1 )\n \n # Concatenate the residual units of e/branch for the skip connections \n branch_sum_0 = concatenate( [branch0[0], branch1[0], branch2[0], \n branch3[0] ], axis=4)\n \n branch_sum_1 = concatenate( [branch0[1], branch1[1], branch2[1], \n branch3[1] ], axis=4)\n \n branch_sum_2 = concatenate( [branch0[2], branch1[2], branch2[2], \n branch3[2] ], axis=4)\n \n # Create bridge between encoder and decoder\n path = res_block(branch_sum_2, [filters_1*8, filters_1*8, filters_1*8], \n [(2, 2, 2), (1, 1, 1)])\n\n # Create decoder branch\n path = decoder(path, branch_sum_0, branch_sum_1, branch_sum_2, filters_1)\n\n # Last filter, this outputs the velocity in Z-direction\n # for pressure or the full velocity tensor, one could change the \n # number of filters to > 1\n path = Conv3D(filters=1, kernel_size=(1, 1, 1), activation='selu')(path)\n\n return Model(inputs=[inputs0,inputs1,inputs2,inputs3], outputs=path)", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def build_UNet_graph(self):\n # 1. unit \n with tf.name_scope('1.unit'):\n W1_1 = self.weight_variable([3,3,self.input_shape[2],16], 'W1_1')\n b1_1 = self.bias_variable([16], 'b1_1')\n Z1 = self.conv2d(self.x_data_tf, W1_1, 'Z1') + b1_1\n A1 = self.activation(self.batch_norm_layer(Z1)) # (.,128,128,16)\n A1_drop = self.dropout_layer(A1)\n W1_2 = self.weight_variable([3,3,16,16], 'W1_2')\n b1_2 = self.bias_variable([16], 'b1_2')\n Z2 = self.conv2d(A1_drop, W1_2, 'Z2') + b1_2\n A2 = self.activation(self.batch_norm_layer(Z2)) # (.,128,128,16)\n P1 = self.max_pool_2x2(A2, 'P1') # (.,64,64,16)\n # 2. unit \n with tf.name_scope('2.unit'):\n W2_1 = self.weight_variable([3,3,16,32], \"W2_1\")\n b2_1 = self.bias_variable([32], 'b2_1')\n Z3 = self.conv2d(P1, W2_1) + b2_1\n A3 = self.activation(self.batch_norm_layer(Z3)) # (.,64,64,32)\n A3_drop = self.dropout_layer(A3)\n W2_2 = self.weight_variable([3,3,32,32], \"W2_2\")\n b2_2 = self.bias_variable([32], 'b2_2')\n Z4 = self.conv2d(A3_drop, W2_2) + b2_2\n A4 = self.activation(self.batch_norm_layer(Z4)) # (.,64,64,32)\n P2 = self.max_pool_2x2(A4) # (.,32,32,32)\n # 3. unit\n with tf.name_scope('3.unit'):\n W3_1 = self.weight_variable([3,3,32,64], \"W3_1\")\n b3_1 = self.bias_variable([64], 'b3_1')\n Z5 = self.conv2d(P2, W3_1) + b3_1\n A5 = self.activation(self.batch_norm_layer(Z5)) # (.,32,32,64)\n A5_drop = self.dropout_layer(A5)\n W3_2 = self.weight_variable([3,3,64,64], \"W3_2\")\n b3_2 = self.bias_variable([64], 'b3_2')\n Z6 = self.conv2d(A5_drop, W3_2) + b3_2\n A6 = self.activation(self.batch_norm_layer(Z6)) # (.,32,32,64)\n P3 = self.max_pool_2x2(A6) # (.,16,16,64)\n # 4. unit\n with tf.name_scope('4.unit'):\n W4_1 = self.weight_variable([3,3,64,128], \"W4_1\")\n b4_1 = self.bias_variable([128], 'b4_1')\n Z7 = self.conv2d(P3, W4_1) + b4_1\n A7 = self.activation(self.batch_norm_layer(Z7)) # (.,16,16,128)\n A7_drop = self.dropout_layer(A7)\n W4_2 = self.weight_variable([3,3,128,128], \"W4_2\")\n b4_2 = self.bias_variable([128], 'b4_2')\n Z8 = self.conv2d(A7_drop, W4_2) + b4_2\n A8 = self.activation(self.batch_norm_layer(Z8)) # (.,16,16,128)\n P4 = self.max_pool_2x2(A8) # (.,8,8,128)\n # 5. unit \n with tf.name_scope('5.unit'):\n W5_1 = self.weight_variable([3,3,128,256], \"W5_1\")\n b5_1 = self.bias_variable([256], 'b5_1')\n Z9 = self.conv2d(P4, W5_1) + b5_1\n A9 = self.activation(self.batch_norm_layer(Z9)) # (.,8,8,256)\n A9_drop = self.dropout_layer(A9)\n W5_2 = self.weight_variable([3,3,256,256], \"W5_2\")\n b5_2 = self.bias_variable([256], 'b5_2')\n Z10 = self.conv2d(A9_drop, W5_2) + b5_2\n A10 = self.activation(self.batch_norm_layer(Z10)) # (.,8,8,256)\n # 6. unit\n with tf.name_scope('6.unit'):\n W6_1 = self.weight_variable([3,3,256,128], \"W6_1\")\n b6_1 = self.bias_variable([128], 'b6_1')\n U1 = self.conv2d_transpose(A10, 128) # (.,16,16,128)\n U1 = tf.concat([U1, A8], 3) # (.,16,16,256)\n Z11 = self.conv2d(U1, W6_1) + b6_1\n A11 = self.activation(self.batch_norm_layer(Z11)) # (.,16,16,128)\n A11_drop = self.dropout_layer(A11)\n W6_2 = self.weight_variable([3,3,128,128], \"W6_2\")\n b6_2 = self.bias_variable([128], 'b6_2')\n Z12 = self.conv2d(A11_drop, W6_2) + b6_2\n A12 = self.activation(self.batch_norm_layer(Z12)) # (.,16,16,128)\n # 7. unit \n with tf.name_scope('7.unit'):\n W7_1 = self.weight_variable([3,3,128,64], \"W7_1\")\n b7_1 = self.bias_variable([64], 'b7_1')\n U2 = self.conv2d_transpose(A12, 64) # (.,32,32,64)\n U2 = tf.concat([U2, A6],3) # (.,32,32,128)\n Z13 = self.conv2d(U2, W7_1) + b7_1\n A13 = self.activation(self.batch_norm_layer(Z13)) # (.,32,32,64)\n A13_drop = self.dropout_layer(A13)\n W7_2 = self.weight_variable([3,3,64,64], \"W7_2\")\n b7_2 = self.bias_variable([64], 'b7_2')\n Z14 = self.conv2d(A13_drop, W7_2) + b7_2\n A14 = self.activation(self.batch_norm_layer(Z14)) # (.,32,32,64)\n # 8. unit\n with tf.name_scope('8.unit'):\n W8_1 = self.weight_variable([3,3,64,32], \"W8_1\")\n b8_1 = self.bias_variable([32], 'b8_1')\n U3 = self.conv2d_transpose(A14, 32) # (.,64,64,32)\n U3 = tf.concat([U3, A4],3) # (.,64,64,64)\n Z15 = self.conv2d(U3, W8_1) + b8_1\n A15 = self.activation(self.batch_norm_layer(Z15)) # (.,64,64,32)\n A15_drop = self.dropout_layer(A15)\n W8_2 = self.weight_variable([3,3,32,32], \"W8_2\")\n b8_2 = self.bias_variable([32], 'b8_2')\n Z16 = self.conv2d(A15_drop, W8_2) + b8_2\n A16 = self.activation(self.batch_norm_layer(Z16)) # (.,64,64,32)\n # 9. unit \n with tf.name_scope('9.unit'):\n W9_1 = self.weight_variable([3,3,32,16], \"W9_1\")\n b9_1 = self.bias_variable([16], 'b9_1')\n U4 = self.conv2d_transpose(A16, 16) # (.,128,128,16)\n U4 = tf.concat([U4, A2],3) # (.,128,128,32)\n Z17 = self.conv2d(U4, W9_1) + b9_1\n A17 = self.activation(self.batch_norm_layer(Z17)) # (.,128,128,16)\n A17_drop = self.dropout_layer(A17)\n W9_2 = self.weight_variable([3,3,16,16], \"W9_2\")\n b9_2 = self.bias_variable([16], 'b9_2')\n Z18 = self.conv2d(A17_drop, W9_2) + b9_2\n A18 = self.activation(self.batch_norm_layer(Z18)) # (.,128,128,16)\n # 10. unit: output layer\n with tf.name_scope('10.unit'):\n W10 = self.weight_variable([1,1,16,1], \"W10\")\n b10 = self.bias_variable([1], 'b10')\n Z19 = self.conv2d(A18, W10) + b10\n A19 = tf.nn.sigmoid(self.batch_norm_layer(Z19)) # (.,128,128,1)\n \n self.z_pred_tf = tf.identity(Z19, name='z_pred_tf') # (.,128,128,1)\n self.y_pred_tf = tf.identity(A19, name='y_pred_tf') # (.,128,128,1)\n \n print('Build UNet Graph: 10 layers, {} trainable weights'.format(\n self.num_of_weights([W1_1,b1_1,W1_2,b1_2,W2_1,b2_1,W2_2,b2_2,\n W3_1,b3_1,W3_2,b3_2,W4_1,b4_1,W4_2,b4_2,\n W5_1,b5_1,W5_2,b5_2,W6_1,b6_1,W6_2,b6_2,\n W7_1,b7_1,W7_2,b7_2,W8_1,b8_1,W8_2,b8_2,\n W9_1,b9_1,W9_2,b9_2,W10,b10])))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
it computes the error vector between desired and obtained output, stored at the last layer
def error(input_, output): global number_of_neurons_by_layer if len(output) != number_of_neurons_by_layer[-1]: raise IndexError( f"\033[91mDesired output length is incorrect. It must be {number_of_neurons_by_layer[-1]}.\033[m") output = np.array(output).reshape(len(output), 1) flow(input_) layers[-1]["error"] = output - layers[-1]["v"]
[ "def output_error(dblActivation,dblTarget):\n return(dblTarget - dblActivation)", "def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error", "def get_error(self, output,target):\n return [target[i]-output[i] for i in range(len(output))]", "def get_error_deriv(self,target):\n return self.final_layer.get_error_deriv(target)", "def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)", "def calculate_error(self, actual, predicted):\n\t\treturn actual - predicted", "def error_feature():\n return self.dataX @ (self.dataY - self.hypothesis(self.dataX))", "def input_error(self, out_influence, new_weights):\n in_influence = np.dot(np.transpose(new_weights), out_influence)\n return in_influence", "def ErrorFunction(p,x,y,z):\n \n return TargetFunction(p,x,y) - z", "def computeError(self):\n num_correct = 0\n total = len(self.targets) * len(self.targets[0])\n err = 0\n for index in range(len(self.inputs)):\n desired_output = self.targets[index]\n actual_output = self.propagate(self.inputs[index])\n for i in range(len(desired_output)):\n off_by = abs(actual_output[i] - desired_output[i])\n if off_by <= self.tolerance:\n num_correct += 1\n err += (desired_output[i] - actual_output[i])**2\n score = (num_correct*100)/total\n # print(num_correct, total, score, err)\n return (num_correct, total, score, err)", "def backPropagate(self):\n # Propagates error from output layer into the last hidden layer and \n # adjusts output weights\n outWeightsGradient = self.activationValues[-1].T @ self.error\n outBiasGradient = np.sum(self.error,axis=0)\n if self.lmbd > 0.0:\n outWeightsGradient += self.lmbd * self.outWeights\n self.outWeights -= next(self.eta) * outWeightsGradient\n self.outBias -= next(self.eta) * outBiasGradient\n\n\n errorHidden = self.error @ self.outWeights.T * self.activationFunction(self.activationValues[-1], self.alpha, derivative=True)\n for layer in range(self.hiddenLN-1, 0, -1):\n # The output layer is not modelled under self.weightsHidden, so \n # there is a special case for the last layer\n if layer != self.hiddenLN - 1:\n\n errorHidden = errorHidden @ self.weightsHidden[layer].T \n errorHidden*= self.activationFunction(self.activationValues[layer], self.alpha, derivative=True)\n\n # hiddenWeightGradient = next(self.eta) * errorHidden.T @ self.activationValues[layer-1]/self.Ninputs\n hiddenWeightGradient = next(self.eta) * self.activationValues[layer-1].T @ errorHidden/self.Ninputs\n hiddenBiasGradient = next(self.eta) * np.sum(errorHidden, axis=0)/self.Ninputs\n\n if self.lmbd > 0.0:\n hiddenWeightGradient += self.lmbd * self.weightsHidden[layer-1]\n\n self.weightsHidden[layer-1] -= hiddenWeightGradient\n self.biasHidden[layer] -= hiddenBiasGradient\n\n # Propagates error from second layer into the first hidden layer and \n # adjusts input weights\n inputWeightsGradient = self.X.T @ errorHidden\n inputBiasGradient = np.sum(errorHidden, axis=0)\n if self.lmbd > 0.0:\n inputWeightsGradient += self.lmbd * self.inputWeights\n self.inputWeights -= next(self.eta) * inputWeightsGradient/self.Ninputs\n self.inputBias -= next(self.eta) * inputBiasGradient/self.Ninputs", "def CalculateErrorBetweenLayers(self, currentHiddenLayer: NeuronLayer, previousLayer: NeuronLayer, nextLayer: NeuronLayer):\n\n # for every neuron in the current hidden layer\n for currentNeuronIndex in range(len(currentHiddenLayer.neurons)):\n sumOfErrors = 0.0\n currentNeuron = currentHiddenLayer.neurons[currentNeuronIndex]\n\n for previousNeuronIndex in range(len(previousLayer.neurons)):\n previousNeuron = previousLayer.neurons[previousNeuronIndex]\n if not previousNeuron.isPresetNeuron:\n sumOfErrors += previousNeuron.weights[currentNeuronIndex] * \\\n previousNeuron.error\n\n previousNeuron.weights[currentNeuronIndex] += self.learnRate * \\\n currentNeuron.output * previousNeuron.error\n\n currentNeuron.error = (\n 1-(math.tanh(currentNeuron.output))) * sumOfErrors\n # currentNeuron.error = derivative_sigmoid(\n # currentNeuron.output) * sumOfErrors\n\n if nextLayer.isInputLayer:\n for weightIndex in range(len(nextLayer.neurons)):\n if not currentNeuron.isPresetNeuron:\n currentNeuron.weights[weightIndex] += self.learnRate * \\\n nextLayer.neurons[weightIndex].output * \\\n currentNeuron.error", "def _calculate_final_error(self) -> float:\n\n error = np.sum(abs(self.T_x0 - self.T_data))/len(self.t[1:])\n return round(error, 3)", "def networkErrorCorrection(self):\n self.resultErrorCorrection()\n for layerNumber in range(len(self.networkList)-3,-1,-2):\n weightCounter = 0\n if layerNumber >= 0:\n for inputLayer in self.networkList[layerNumber]:\n inputLayer[0] = 0\n for errorLayer in self.networkList[layerNumber+2]:\n inputLayer[0] += self.networkList[layerNumber+1][weightCounter]*errorLayer[0]\n self.networkList[layerNumber+1][weightCounter] +=round(errorLayer[0]*self.alpha*inputLayer[1],4)\n weightCounter += 1\n inputLayer[0] = (inputLayer[1])*(1-inputLayer[1])*inputLayer[0] \n inputLayer[1] = 0", "def calc_error(self):\n if self._fit_data.y is not None and self._fit_data.y_fit is not None:\n self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit", "def calculate_global_error(self,\n target: [float]) -> float:\n assert(len(target) == len(self.layer[-1]))\n global_error = 0\n last_layer = self.layer[-1]\n for i in range(len(last_layer)):\n global_error += last_layer[i].calculate_error(target[i])\n return global_error", "def backward(self, error):\n layer = self.layers-2 # Start on the first hidden layer before output neurons\n\n # setup up the derivative of layer sums for delta calculation below\n # self.layer_sums = [[self.act[layer].derivative(val) for val in layer] for layer in self.layer_sums]\n\n while layer >= 0:\n self.layer_sums[layer] = [self.act[layer].derivative(val) for val in self.layer_sums[layer]]\n\n # Multiply error by sum to get change needed in nodes\n delta = [[a*b for a,b in zip(self.layer_sums[layer], error)]]\n\n # Check for a layer delta containing all zeros, meaning network has a dead layer and will not update\n chk = False\n for j in error:\n if j != 0:\n chk = True\n break\n if chk:\n for v in delta[0]:\n if v != 0:\n chk = False\n break\n if chk:\n raise Exception(\"DELTA ERROR: A layer in the network has a delta of 0. Network layer has died. \")\n\n # Get weights for actual neurons not including bias\n if self.__bias:\n layer_weights = []\n for neuron in self.weights[layer]:\n layer_weights.append(neuron[:-1])\n else:\n layer_weights = self.weights[layer]\n\n # Transmit error over weights to previous layer\n output = multiply_2d(delta, layer_weights)\n\n # set error for next layer\n error = output[0]\n\n # calculated needed adjustments\n adjustment = multiply_constant(self.__learning_constant, multiply_2d(transpose_2d(delta), [self.layer_inputs[layer]]))\n\n # Adjust weights for this layer\n self.weights[layer] = add_2d(adjustment, self.weights[layer])\n\n layer -= 1", "def _compute_prediction_error(prediction, target):\n error = prediction - target\n return error, np.linalg.norm(error, ord=2)", "def error(self, x,y):\n\t\treturn T.mean(T.neq(T.argmax(x, axis=1),T.argmax(y, axis=1)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
it gets the list of weigths
def getweigths(): ls = [] for i_lay in range(1, len(layers)): ls.append(layers[i_lay]["weigths"]) return ls
[ "def _weichall():\n try:\n LAchall=LOSC('a').rchall();LBchall=LOSC('b').rchall();L2chall=LOSC('2').rchall();\n allwvfm=[*LAchall[:2],*LBchall,*L2chall];\n allenergy=[*EMeters.EG1wYFE1in(),*EMeters.EG1w2in()[0],*EMeters.EG()[0][0]]\n allweich=[]\n for ii in range(10):\n templistQ=allwvfm[ii]\n bkgrdbuffer=int(0.038*len(templistQ))\n bkgrdQ=np.mean(templistQ[:bkgrdbuffer])\n ensampQ=allenergy[ii]\n weightQ=ensampQ/np.sum(np.array(templistQ)-bkgrdQ)\n allweich.append(np.array(weightQ*(np.array(templistQ)-bkgrdQ)))\n return allweich\n except:\n print('Weighted waveform generation failed!')\n return False", "def TW_display():\n a = twsew().weight\n return a", "def get_weights(self):", "def designWavelengths(self):\n\n wavelengths = self.value(\"WAVM\", index=1)\n if isinstance(wavelengths, list):\n while float(wavelengths[-1]) == 0.55:\n wavelengths.pop()\n return [ float(x) for x in wavelengths]\n else:\n return [float(wavelengths)]", "def init_wge(ratios, smoothing):\n wge = []\n # Compile the @@BORING matching regex\n re_band = re.compile(r'^@@BORING__0{1,7}')\n\n for token in ratios.keys():\n # Init the WGE\n ratios[token]['WGE'] = ratios[token]['hconstant'] * \\\n math.log(smoothing / 1)\n # Ignore unwanted words\n if token == '@@DUBIOUS' \\\n or token == '@@USELESS' \\\n or token == '@@IMPOSSIBLE' \\\n or token.startswith('@@-') \\\n or re_band.match(token) is not None:\n continue\n # Add the WGE to teh list\n wge.append((ratios[token]['WGE'], token, ratios[token]['hconstant']))\n # And sort it, of course\n wge = sorted(wge, key=lambda x: x[0])\n\n return wge", "def get_weights(self):\n return [self.w, self.b]", "def get_weight_list(self) -> List[float]:\n return self._weight_list", "def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][3]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][3]\n return list[itemId]", "def get_Delta_weigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"Delta_w\"])\n return ls", "def _get_waveform_table(self, awg_nr: int) -> list:\n ch = awg_nr*2\n wf_table = []\n if self.cases() is not None:\n for case in self.cases():\n wf_table.append((zibase.gen_waveform_name(ch, case),\n zibase.gen_waveform_name(ch+1, case)))\n return wf_table", "def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]", "def get_weights():\n coverage_weight = 0.7\n ease_weight = 2.0\n length_weight = 0.4\n names = (\n 'coverage_weight', 'ease_of_observability_weight', 'length_weight',\n )\n for parameter in Parameter.objects.filter(name__in=names):\n if parameter.name == 'coverage_weight':\n coverage_weight = parameter.value\n elif parameter.name == 'ease_of_observability_weight':\n ease_weight = parameter.value\n elif parameter.name == 'length_weight':\n length_weight = parameter.value\n return coverage_weight, ease_weight, length_weight", "def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm", "def parse_weights(ln_list):\n weights = []\n for ln in ln_list:\n weights.append(ln.split()[2])\n return weights", "def get_weights(self):\n pass", "def wls(self):\n return self.wavelengths", "def _get_waveform_table(self, awg_nr: int) -> list:\n ch = awg_nr*2\n wf_table = []\n if 'flux' in self.cfg_codeword_protocol():\n for cw_r in range(8):\n for cw_l in range(8):\n wf_table.append((zibase.gen_waveform_name(ch, cw_l),\n zibase.gen_waveform_name(ch+1, cw_r)))\n else:\n for dio_cw in range(self._num_codewords):\n wf_table.append((zibase.gen_waveform_name(ch, dio_cw),\n zibase.gen_waveform_name(ch+1, dio_cw)))\n return wf_table", "def _get_wl_data(word_list):\n\n words = [word for word in word_list if word.isalpha]\n num_words = len(words)\n lengths = sorted([len(word) for word in words])\n\n wl_range = max(lengths) - min(lengths)\n wl_mean = sum(lengths) / num_words\n wl_median = Metrics._get_median(lengths)\n wl_mode = Metrics._get_mode(lengths)\n\n unique = set(word_list)\n lex_div = len(unique) / float(num_words)\n\n return {\"wl_mean\": wl_mean, \"wl_median\": wl_median, \"wl_mode\": wl_mode,\n \"wl_range\": wl_range, \"pl_words\": num_words, \"lex_div\": lex_div}", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
it gets the list of "Delta_w"
def get_Delta_weigths(): ls = [] for i_lay in range(1, len(layers)): ls.append(layers[i_lay]["Delta_w"]) return ls
[ "def _get_delta_units(self) -> List[str]:\n return [u for u in self._units if u.startswith(\"delta_\")]", "def calc_dDelta(self):\n self.dDelta = np.zeros((self.nphon, self.nwann, self.nwann))\n #iq, iv, iR\n iR0 = iRlist[(0, 0, 0)]\n iq0 = iqlist[(0, 0, 0)]\n\n for iv in range(self.nphon):\n for ik, k in enumerate(self.klist):\n self.dDelta[iv] += self.kweight[ik] * (\n self.EPCmat_wann_up[iq0, iv, ik, :, :] -\n self.EPCmat_wann_dn[iq0, iv, ik, :, :])", "def deltas(x, w=9):\n _, cols = x.shape\n hlen = np.floor(w / 2)\n win = np.arange(hlen, -(hlen + 1), -1, dtype='float32')\n\n xx = np.append(np.append(np.tile(x[:, 0], (int(hlen), 1)).T, x, axis=1),\n np.tile(x[:, cols - 1], (int(hlen), 1)).T,\n axis=1)\n from scipy.signal import lfilter\n deltas = lfilter(win, 1, xx, axis=1)[:, int(2 * hlen):int(2 * hlen + cols)]\n return deltas", "def getweigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"weigths\"])\n return ls", "def get_w_lims(self):\n minx, maxx = self.get_xlim3d()\n miny, maxy = self.get_ylim3d()\n minz, maxz = self.get_zlim3d()\n return minx, maxx, miny, maxy, minz, maxz", "def new_w_vector():\n return [1] * 201", "def calculate_delta_weights(self, out_tensors: List[Tensor], in_tensors: List[Tensor]) -> List[Tensor]:\n return None", "def getOscDelta():\n tmp_channels = GetAllSelCh(True)\n delta_store = {}\n for ch in tmp_channels:\n if isTubeChannel(ch) and GetOscType(ch):\n name = GetChName(ch).lower()\n color_name = getChannelColor(name)\n osc_chase = GetOscChase(ch)\n\n if osc_chase and color_name not in delta_store:\n for x in range(ch + 1, 512):\n if isTubeChannel(x) and GetOscType(x):\n namex = GetChName(x).lower()\n color_name = getChannelColor(namex)\n osc_chasex = GetOscChase(x)\n chase_value = abs(osc_chase - osc_chasex)\n delta_store[color_name] = chase_value\n if \"first\" not in delta_store:\n delta_store[\"first\"] = chase_value\n break\n\n if len(delta_store) == 4:\n break\n return delta_store", "def get_transitions(self, w):\n return np.array([((i, j), self.dij(j, i), np.abs(e1 - e2), 0)\n for j, e1 in enumerate(self.ev)\n for i, e2 in enumerate(self.ev)\n if np.isclose(e1 - e2, w)], dtype=DTYPE_JUMP)", "def extract_wind(source,la,lo,lats,lons,wd,ws):\r\n lat = source[la]\r\n lon = source[lo]\r\n wdir = []\r\n wspd = [] \r\n for coor in zip(lon,lat): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n # since lons are 0 thru 360, convert to -180 thru 180\r\n converted_lons = lons - ( lons.astype(np.int32) / 180) * 360\r\n # get cell of facility\r\n lat_idx = geo_idx(in_lat, lats)\r\n lon_idx = geo_idx(in_lon, converted_lons)\r\n #extract winddirection and wind speed from that cell\r\n d = wd[:,lat_idx,lon_idx][0]\r\n wdir.append(d)\r\n s = ws[:,lat_idx,lon_idx][0]\r\n wspd.append(s)\r\n \r\n return wdir,wspd", "def _get_vector_defining_possible_idling_direction(w_star: WorkloadSpace,\n w: WorkloadSpace) -> WorkloadSpace:\n return w_star - w", "def energia(s,w):\n \n Energy = []\n #for over the signal in steps of len(w)\n #for n in range(0,len(s)-len(w),len(w)):\n for n in range(0,len(s)-len(w)):\n \n #print(n,':',n+len(w))\n #print(len(s))\n trama = s[n:n+len(w)] * w #actual windowed segment\n \n Energy.append(np.sum(trama**2))\n \n return np.array(Energy)", "def get_dt_wadati(self,absolute=False):\n \n dtp=[]\n dts=[]\n min_delta=[]\n \n for event in self.events:\n \n un_station=[x.station for x in event.phases]\n un_station=list(set(un_station))\n un_station.sort()\n \n\n tpp=[]\n tss=[]\n \n \n for station in un_station:\n phase_P=event.get_phase(station,1)\n phase_S=event.get_phase(station,2)\n \n if (phase_P is not None) and (phase_S is not None):\n tpp.append(phase_P.t_obs)\n tss.append(phase_S.t_obs)\n \n else:\n continue\n \n #### Do the combinations\n ind_combi=list(itertools.combinations(list(range(len(tpp))), 2))\n \n for index in ind_combi:\n dpp=tpp[index[0]]-tpp[index[1]]\n dss=tss[index[0]]-tss[index[1]]\n delta_1=tss[index[0]]-tpp[index[0]]\n delta_2=tss[index[1]]-tpp[index[1]]\n delta=np.min([delta_1,delta_2])\n dtp.append(dpp)\n dts.append(dss)\n min_delta.append(delta)\n \n \n dtp=np.array(dtp)\n dts=np.array(dts)\n if absolute:\n dtp=np.abs(dtp)\n dts=np.abs(dts)\n min_delta=np.array(min_delta)\n \n return dtp,dts,min_delta", "def extract_delta_Q_skewness(batch,index,start_cycle,end_cycle):\n from scipy.stats import skew\n X= []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n Qd_100 = batch[cell_no]['cycles'][str(end_cycle-1)]['Qdlin']\n Qd_10 = batch[cell_no]['cycles'][str(start_cycle-1)]['Qdlin']\n delta = Qd_100-Qd_10\n # delta_rv_mean = delta - np.average(delta)\n # temp = np.average(np.power(delta_rv_mean,3)) / np.power(np.sum(np.power(delta_rv_mean,2)),1.5)\n # Note: Supplementary formular is wrong\n temp = skew(delta)\n skewness = log(abs(temp),10)\n X.append(skewness)\n X = np.reshape(X,(-1,1))\n return X\n pass", "def calc_walked_distance(self, window_size=0):\n walked_distance = []\n walked_distance_window = []\n for i in range(len(self)):\n vel = self.get_absolute_velocity(i)\n if vel is None: vel = 0\n walked_distance.append(\n vel + (walked_distance[i-1] if i>0 else 0)\n )\n walked_distance_window.append(\n walked_distance[i] - (walked_distance[i-window_size] if i>window_size else 0)\n )\n return walked_distance, walked_distance_window", "def update_W(self):\n self.sigma_W = [\n np.linalg.inv(self.E_tau(m) * self.E_ZZ() + np.diag(self.alpha[m]))\n for m in range(self.groups)]\n self.m_W = [self.E_tau(m) * self.sigma_W[m] @ self.E_Z() @ self.X[m].T\n for m in range(self.groups)]", "def wls(self):\n return self.wavelengths", "def get_flujos_derivado(self):\n return self.flujos_derivados", "def get_weights(self):\n return [self.w, self.b]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets/clears a software breakpoint address > the address of the software breakpoint instruction > the instruction to be programmed (either the software breakpoint opcode or the original instruction the software breakopint was replacing). flags > One or more of the SWBPFlags listed below returns the original/old opcode at address
def set_sw_bp(address, instruction, flags): log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % ( address, instruction, flags)) # Accept addressing both from FLASH_START and from 0x0 addr = address & (FLASH_START-1) single_page_access = False buffer_size = PAGE_SIZE * 16 # Canopus: single page read-modify-write is possible within the first 16kb of flash. # SAMRH71: single page read-modify-write is possible in whole flash. if addr < 16384 or "RH71" in device: buffer_size = PAGE_SIZE single_page_access = True buffer_mask = long(buffer_size-1) data_buffer = bytearray(buffer_size) # Get the start address to the flash page(es) we need to erase start_addr = addr & ~(buffer_mask) absolute_start_addr = address & ~(buffer_mask) # Get BP address within the buffer bp_addr = addr & buffer_mask prog_read("pgm", absolute_start_addr, buffer_size, data_buffer) org_inst = 0 n = 0 # Replace instruction in data_buffer while(n < 2): org_inst += data_buffer[bp_addr+n] << (n*8) data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff) n = n+1 if single_page_access: if "RH71" in device: # Remove flash offset, if any, and mask away page internal address bits. # FARG bitfield in EFC_FCR page_number = addr & 0x3fff00 # SAMRH71 has page_size 256 # Erase and write page (two separate commands on SAMRH71) dev.Write32(efc_fcr, efc_cmd_ep | page_number) waitForFlashReady() dev.Write(start_addr, data_buffer, 0, PAGE_SIZE) dev.Write32(efc_fcr, efc_cmd_wp | page_number) waitForFlashReady() else: dev.Write(start_addr, data_buffer, 0, PAGE_SIZE) # Remove flash offset, if any, and mask away page internal address bits. # Then shift right once to position page_number in the FARG bitfield in EFC_FCR page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512 # Erase and write page (one single command on Canopus) dev.Write32(efc_fcr, efc_cmd_ewp | page_number) waitForFlashReady() else: # Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase. dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200) waitForFlashReady() prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer) return org_inst
[ "def set_sw_bp(address, instruction, flags):\n log.log(LEVEL_INFO, \"Debug:: set bp at address 0x%0x, store instructions 0x%0x, flags = 0x%0x\" % (address, instruction, flags))", "def bp(self, expr=None, handler=None, windbgcmd=None, oneshot=False,\n passcount=None, threadid=None, count=0xffffffff):\n if expr is None:\n expr = self.reg.get_pc()\n #if threadid is None:\n # threadid = self._systems.GetCurrentThreadId()\n if handler:\n handler = util.bp_wrap(self, handler)\n return self.breakpoints.set(expr,\n handler, \n DbgEng.DEBUG_BREAKPOINT_CODE,\n windbgcmd,\n oneshot,\n passcount,\n threadid,\n count=count)", "def debugger_add_sw_breakpoint():", "def shift_breakpoint(self):\r\n self.current_breakpoint = self.next_breakpoint\r\n if self.breakpoints:\r\n self.next_breakpoint = self.breakpoints.pop(0)\r\n else:\r\n self.next_breakpoint = None", "def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False):\n if flash and not ram:\n flags = enums.JLinkBreakpoint.SW_FLASH\n elif not flash and ram:\n flags = enums.JLinkBreakpoint.SW_RAM\n else:\n flags = enums.JLinkBreakpoint.SW\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Software breakpoint could not be set.')\n\n return handle", "def debugger_clear_breakpoint():", "def test_06_changeBreakpointState(self):\n self.ikpdb.set_breakpoint('debugged_programs/test02_breakpoints.py',\n line_number=7,\n enabled=True)\n self.ikpdb.set_breakpoint('debugged_programs/test02_breakpoints.py',\n line_number=8,\n condition='a_var==1',\n enabled=True)\n self.ikpdb.set_breakpoint('debugged_programs/test02_breakpoints.py',\n line_number=10,\n condition='a_var==60',\n enabled=True)\n\n self.ikpdb.run_script()\n\n # break at line 7\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['command'], \"programBreak\", \"programBreak message not received.\")\n self.assertEqual(i_msg['exception'], None, \"Unexpected exception raised.\")\n top_frame = i_msg['frames'][0]\n self.assertEqual(top_frame['file_path'], \n 'debugged_programs/test02_breakpoints.py', \n \"programBreak on unexpected file.\")\n self.assertEqual(top_frame['line_number'], 7, \"programBreak on unexpected line number.\")\n \n # disable breakpoint at line 8 (second)\n msg_id = self.ikpdb.send('changeBreakpointState', \n breakpoint_number=1,\n enabled=False)\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['_id'], msg_id, \"Unexpected reply to 'clearBreakpoint'.\")\n self.assertEqual(i_msg['commandExecStatus'], \"ok\", \"'changeBreakpoint' command failed.\")\n \n # modify condition on last breakpoint so that it will trigger\n msg_id = self.ikpdb.send('changeBreakpointState', \n breakpoint_number=2,\n enabled=True,\n condition=\"a_var==50\")\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['_id'], msg_id, \"Unexpected reply to 'clearBreakpoint'.\")\n self.assertEqual(i_msg['commandExecStatus'], \"ok\", \"'changeBreakpoint' command failed.\")\n\n bp_list = self.ikpdb.get_breakpoints()\n\n # resume()\n msg_id = self.ikpdb.send('resume')\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['_id'], msg_id, \"Unexpected reply to 'clearBreakpoint'.\")\n self.assertEqual(i_msg['commandExecStatus'], \"ok\", \"'resume' command failed.\")\n self.assertEqual(i_msg['result'].get('executionStatus'), 'running', \"'resume' command failed.\")\n\n # check that last breakpoint triggers\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['command'], \"programBreak\", \"programBreak message not received.\")\n self.assertEqual(i_msg['exception'], None, \"Unexpected exception raised.\")\n top_frame = i_msg['frames'][0]\n self.assertEqual(top_frame['file_path'], \n 'debugged_programs/test02_breakpoints.py', \n \"programBreak on unexpected file.\")\n self.assertEqual(top_frame['line_number'], 10, \"programBreak on unexpected line number.\")", "def debugger_disable_breakpoint():", "def breakpoint(self):\n\t\treturn self.expr(core.LLIL_BP)", "def debugger_add_hw_breakpoint():", "def remove_breakpoint(self, bp: Breakpoint) -> None:\n for hwbp in self.hw_breakpoints:\n if hwbp.enabled and hwbp.addr == bp.addr:\n hwbp.enabled = False\n self.ap.write_memory(hwbp.comp_register_addr, 0)\n self.num_hw_breakpoint_used -= 1\n return", "def breakpoint_find(self, addr):\n return self._dll.JLINKARM_FindBP(addr)", "def debugger_enable_breakpoint():", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def set_breakpoint(context, *args):\n\n vars = [arg for arg in locals()['args']] # noqa F841\n\n if settings.DEBUG:\n breakpoint()", "def debugger_disable_all_breakpoints():", "def breakpoint_set(self, addr, thumb=False, arm=False):\n flags = enums.JLinkBreakpoint.ANY\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Breakpoint could not be set.')\n\n return handle", "def bl(self):\n # Make a copy so we can remove stale if needed\n ids = [bpid for bpid in self.breakpoints]\n for bpid in ids:\n try:\n bp = self._control.GetBreakpointById(bpid)\n except exception.E_NOINTERFACE_Error:\n self.breakpoints._remove_stale(bpid)\n continue\n\n if bp.GetFlags() & DbgEng.DEBUG_BREAKPOINT_ENABLED:\n status = 'e'\n else:\n status = 'd'\n if bp.GetFlags() & DbgEng.DEBUG_BREAKPOINT_DEFERRED:\n offset = \"[Deferred]\"\n expr = bp.GetOffsetExpression()\n else:\n offset = \"%016x\" % bp.GetOffset()\n expr = self.get_name_by_offset(bp.GetOffset())\n try:\n tid = bp.GetMatchThreadId()\n tid = \"%04x\" % tid\n except exception.E_NOINTERFACE_Error:\n tid = \"****\"\n\n if bp.GetType()[0] == DbgEng.DEBUG_BREAKPOINT_DATA:\n width, prot = bp.GetDataParameters()\n width = str(width)\n prot = {4: 'e', 2: 'w', 1: 'r'}[prot] \n else:\n width = ' '\n prot = ' '\n print(\"%d %s %16s %s %s %04d %04d 0:%s %s\" % (\n bp.GetId(), status, offset, prot, width,\n bp.GetCurrentPassCount(), bp.GetPassCount(),\n tid, expr))", "def hardware_breakpoint_set(self, addr, thumb=False, arm=False):\n flags = enums.JLinkBreakpoint.HW\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Hardware breakpoint could not be set.')\n\n return handle" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if pen is down, False if it's up.
def isdown(self): return self.pen == 1
[ "def is_pen_down():\n return self.turtle.pen_down", "def isUp ( self ) :\n return not self.isDown()", "def isDown ( self ) :\n return self._ilhcbmagnet.isDown()", "def down(self):\n if (self.down_flag):\n self.down_flag = False\n return True\n else:\n return False", "def drawmode(self):\n return self._turtle.isdown()", "def is_down(self):\n return self.state is None or\\\n self.state.internal_state == State.DOWN", "def up(self):\n if (self.up_flag):\n self.up_flag = False\n return True\n else:\n return False", "def is_pressed(self) -> bool:\n return True", "def is_face_up( self ):\n return self.__face_up", "def IsByPen(self) -> bool:", "def isButtonReleased() -> bool:\n pass", "def _isoff(self):\n return self.dp.state()==PyTango.DevState.OFF", "def pressed(self) -> bool:\n return self.type == \"JOYBUTTONDOWN\"", "def pen_down(self):\n self.pen = True", "def _is_button_pressed(self):\n return self.data.sensordata[0] > 0", "def is_holding_key_down(key: CommonKey) -> bool:\n return _is_holding_key_down(key)", "def pen_down(pen_down=True):\n self.turtle.pen_down = pen_down\n self.send_report()", "def double_pressed(self):\n return self.pressed and self.last_press < 0.1", "def ButtonIsDown(*args, **kwargs):\n return _core_.MouseState_ButtonIsDown(*args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the bearing (angle) of the turtle.
def setbearing(self, bearing): diff = self.bearing - bearing self.b_change = diff self.bearing = bearing self._add_point() self.b_change = 0
[ "def change_angle(self, value):\n if self.mode != Modes.Move:\n return\n\n #Get the current tank\n current_tank = self.cur_team\n\n #Erase it\n self.erase_tank(current_tank)\n\n #Change the angle accordingly\n if value == \"up\":\n current_tank.change_barrel_angle(1)\n\n else:\n current_tank.change_barrel_angle(-1)\n \n #Redraw the tank\n self.draw_tank(current_tank)", "def setBearing(self, bearing):\n diff = self.bearing - bearing\n self.b_change = diff\n self.bearing = bearing\n self._add_point()\n self.b_change = 0", "def bearing(self, value: int):\n self._bearing = value", "def change_angle(self, new_angle):\r\n self.angle = new_angle", "def change_angle(self, up_or_down):\n self.angle += up_or_down * math.pi / 180", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def setAngle(self,a):\n self.angle = a\n if self.drawn == True:\n self.draw()", "def set_angle(self, value):\n if not -90 <= value <= 90:\n raise ValueError('Servo angle must be between -90 and 90 degrees')\n self.duty_cycle = ...", "def setAngle(self, angle):\n self.angle = angle", "def change_angle(self, increase: bool) -> None:\r\n if increase:\r\n self._angle += 10\r\n if self._angle == 360:\r\n self._angle = 0\r\n else:\r\n self._angle -= 10\r\n if self._angle == -10:\r\n self._angle = 350", "def setAngle(self, angle):\n self.vector.angle = angle", "def set_angle(self, angle):\n return self.bot_client.send_command(_Command.SetAngle, angle)", "def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)", "def set_angel(self):\n self.angle = math.degrees(math.atan2(self.next.y - self.y, self.next.x - self.x)\n - math.atan2(self.prev.y - self.y, self.prev.x - self.x))\n\n if self.angle < 0:\n self.angle += 360", "def set_steering(angle):\n motors = _get_car_motors()\n motors.set_steering(angle)", "def angle(self, angle):\n self._angle = angle\n self.x_rate = self._rate * cos(self._angle)\n self.y_rate = self._rate * sin(self._angle)", "def set_angle(self, angle):\n\n # Check that the angle is in range\n assert(angle >= 0)\n assert(angle <= 180)\n\n # Do nothing if there is no change in angle from the last request\n if(self.__angle == angle):\n # Cause the servo to move to the previously set position\n self.__move()\n return\n else:\n self.__angle = angle\n\n # Calculate the duty cycle as a percentage\n # 50Hz => 20ms\n # 1ms => 0deg => 5%\n # 2ms => 180deg => 10%\n # duty = angle/180deg * (1ms * 50Hz / 1000) + (1ms * 50Hz / 1000)\n # duty = (1ms * 50Hz / 1000) * (1 + angle/180deg)\n duty = 5 * (1 + (angle / 180))\n\n # Set the desired position\n self.__pwm.set_duty(duty)\n\n # Cause the servo to move\n self.__move()", "def adjAngle(self, amt): \r\n\r\n self.angle = self.angle + radians(amt)\r\n self.redraw()", "def set_angle(self, angle_key: Union[EKT, str], v: float): # -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method is called by an admin user to approve the lyrics of a song
def approve_lyrics(): pass
[ "def is_lyrics_approved():", "async def lyrics(self, ctx: commands.Context, *, song_name: str):\n try:\n client = await self.obtain_client()\n except AttributeError:\n await ctx.send(\"Not key for KSoft.Si has been set, ask owner to add a key.\")\n return\n try:\n music_lyrics = await client.music.lyrics(song_name)\n except ksoftapi.NoResults:\n await ctx.send(\"No lyrics were found for your music.\")\n return\n message, available_musics = await self._title_choose(music_lyrics)\n await ctx.maybe_send_embed(message)\n predicator = MessagePredicate.less(10, ctx)\n try:\n user_message = await self.bot.wait_for(\"message\", check=predicator, timeout=60)\n except Te:\n await ctx.send(\"It's so silent on the outside...\")\n return\n\n choosen_music = user_message.content\n if choosen_music not in available_musics:\n await ctx.send(\n \"I was unable to find the corresponding music in the available music list.\"\n )\n return\n music = available_musics[choosen_music]\n embeds = []\n embed = discord.Embed(color=await ctx.embed_color(), title=music.name, description=None)\n embed.set_thumbnail(url=music.album_art)\n embed.set_footer(text=\"Powered by KSoft.Si.\", icon_url=ctx.author.avatar_url)\n for text in pagify(music.lyrics):\n embed.description = text\n embeds.append(embed)\n create_task(menu(ctx, embeds, DEFAULT_CONTROLS)) # No await since max_concurrency is here", "def approveAction(self):\r\n\r\n approvedAddedItems = self.tabs.currentWidget().children()[2].selectedItems()\r\n for item in approvedAddedItems:\r\n self.data.session.user.approveAddedRoot(item.text())\r\n self.tabs.widget(self.tabs.count() - 1).children()[2].addItem(item)\r\n\r\n approvedDeletedItems = self.tabs.currentWidget().children()[3].selectedItems()\r\n for item in approvedDeletedItems:\r\n self.data.session.user.approveDeletedLemma(item.text())\r\n self.tabs.widget(self.tabs.count() - 1).children()[3].addItem(item)\r\n\r\n # self.tabs.widget(self.tabs.count() - 1).update()\r\n # self.tabs.widget(self.tabs.count() - 1).update()\r", "async def lyrics(self, ctx, *args):\n state = self.get_state(ctx.guild)\n extract = Song_Lyrics(self.config[\"search_key\"], self.config[\"search_id\"])\n messages = []\n title = None\n lyrics = None\n if len(args) == 0: # now playing lyrics\n if ctx.voice_client is not None and ctx.voice_client.is_playing():\n playing = state.now_playing\n title, lyrics = extract.get_lyrics(playing.title)\n print(len(lyrics))\n print(lyrics)\n\n else:\n await ctx.send(\"Nothing is playing currently, add a song title to the command to search\")\n return\n else: # search lyrics\n song = utils.argument_concat(args)\n if utils.url_validation(song):\n await ctx.send(\"This doesn't take urls fam, just enter the title of the song\")\n return\n title, lyrics = extract.get_lyrics(song)\n message = title + \"\\n\" + lyrics\n if len(message) > 2000:\n while len(message) > 2000:\n index = 2000\n while message[index] != \"\\n\":\n index -= 1\n mes = message[:index]\n sage = message[index:]\n messages.append(mes)\n message = sage\n else:\n messages.append(message)\n for string in messages:\n await ctx.send(string)", "def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)", "def moderate_or_publish(self):\n if self.needs_moderation():#moderate\n self.place_on_moderation_queue()\n else:#auto-approve\n from askbot.models import signals\n signals.post_revision_published.send(None, revision = self)", "def vote(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n _logger.info(\"%s is trying to vote on %s\", request.user, context['song'])\n vote_dict = get_vote_dict(request.user)\n can_vote = context['song'].id not in vote_dict[request.user.id] and context['song'].ready\n if can_vote:\n vote = Vote()\n vote.user = request.user\n vote.song = context['song']\n vote.save()\n vote_dict[request.user.id].append(context['song'].id)\n cache.set('vote_dict', vote_dict)\n logging.info('%s voted on %s.', request.user, context['song'])\n return HttpResponse('Vote registered on %s.' % context['song'])\n else:\n logging.info('%s tried to vote more than once on %s.', request.user.username, context['song'])\n return HttpResponse(\"Du har allerede stemt på denne sangen i dag!\", content_type='text/plain', status=403)", "def change_learned_status(self, instance):\n self.song = self.songs.get_song_by_title(instance.text)\n # Marks song as learned and shows according status text\n if self.song.required:\n self.song.mark_learned()\n status_text = \"You have learned {}\".format(self.song.title)\n # Marks song as required and shows according status text\n else:\n self.song.mark_required()\n status_text = \"You need to learn {}\".format(self.song.title)\n # Shows status text, sorts songs by current s\n self.root.ids.status_text.text = status_text\n self.sort_songs(self.root.ids.sort_options.text)", "def lyricsCmd(bot, update,args):\n print ('I\\'m here')\n searchName = ' '.join(args)\n update.message.text=searchName\n print (update.message.text)\n lyrics(bot,update)", "async def music_role(self, ctx):\n if ctx.invoked_subcommand is None:\n await self.bot.say('Incorrect music role. Please use {0.prefix}help '\n 'music_role to see a list of roles.'.format(ctx))", "async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")", "def help_lyrics(self):\n print_say(\"finds lyrics\\n\", self)\n print_say(\"the format is song,artist\\n\", self)\n print_say(\"song and artist are separated by a - \\n\", self)\n print_say(\"-- Example:\", self)\n print_say(\"\\tlyrics wonderful tonight-eric clapton\", self)", "def update_text(self):\n likes = \"\"\n if self.comedy.get():\n likes += \"You like comedy.\"\n if self.drama.get():\n likes += \"You like drama.\"\n if self.romance.get():\n likes += \"You like romantic.\"\n self.result.delete(0.0, END) # delete from position 0 until the end\n self.result.insert(0.0, likes) # insert to textbox the text in likes in position 0", "def test_can_edit_song(self):\n\n data = {\n 'title': 'new title'\n }\n\n # Can't edit song if unauthorized\n resp = self.client.put(self.songs[0]['url'], data)\n self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED) \n\n # Can edit own song while authorized\n token = self.get_token(self.users[0])\n resp = self.authenticated_request('put', self.songs[0]['url'], data, token)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n # Can't edit other user's song\n resp = self.authenticated_request('put', self.songs[1]['url'], data, token)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "async def author(self, ctx, *, text):\n author_id = ctx.message.author.id\n record = await Mongo.get_record('embed', 'embed_owner', author_id)\n if len(text) > 128:\n await ctx.send(\"Слишком много символов. Максимальное количество: 128\")\n else:\n upg = {\n \"author\":text\n }\n await Mongo.update_record('embed', record, upg)\n await ctx.send(\"Установка заголовка успешна\")", "async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)", "def respond_to_username_mention(self, message):\n\n verses = find_verses(message.body)\n if verses is not None:\n response = Response(message, self.parser)\n for v in verses:\n book_name = books.get_book(v[0])\n if book_name is not None:\n v2 = verse.Verse(self.d, # Database\n book_name, # Book\n v[1], # Chapter\n v[3], # Translation\n message.author, # User\n message.permalink[24:message.permalink.find(\"/\", 24)], # Subreddit\n v[2]) # Verse\n if not response.is_duplicate_verse(v2):\n response.add_verse(v2)\n if len(response.verse_list) != 0:\n message_response = response.construct_message()\n if message_response is not None:\n self.log.info(\"Replying to %s with verse quotations...\" % message.author)\n try:\n message.reply(message_response)\n# self.d.update_db_stats(response.verse_list)\n# self.d.increment_comment_count()\n except praw.errors.Forbidden as err:\n # This message is unreachable.\n pass\n except praw.errors.APIException as err:\n if err.error_type in (\"TOO_OLD\", \"DELETED_LINK\", \"DELETED_COMMENT\"):\n self.log.warning(\"An error occurred while replying with error_type %s.\" % err.error_type)", "def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method is called to check if a song already has lyrics so as to avoid duplicity of lyrics
def song_has_lyrics(): pass
[ "def add_lyrics(self):\n\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n c.execute(\"SELECT songs.id, artist, title, url FROM songs LEFT JOIN lyrics ON songs.id = lyrics.song_id WHERE lyrics.song_id IS NULL\")\n all_songs_to_scrape = c.fetchall()\n for song in all_songs_to_scrape:\n song_id = song[0]\n song_artist = song[1]\n song_title = song[2]\n song_url = song[3]\n print(\"Looking for lyrics for \" + song_title + \" by \" + song_artist)\n try:\n lyrics = pygenius_songs.searchURL(song_url, 'lyrics')\n for lyric in lyrics:\n for line in lyric.split('\\n'):\n c.execute('INSERT INTO lyrics(song_id, line) VALUES (?,?)', (song_id, line))\n conn.commit()\n except Exception as e:\n print(e)\n print song_url\n print(\"Exception caught! ... continuing.\")\n pass", "def lyrics_note_is_same_as_original():\n pass", "async def lyrics(self, ctx, *args):\n state = self.get_state(ctx.guild)\n extract = Song_Lyrics(self.config[\"search_key\"], self.config[\"search_id\"])\n messages = []\n title = None\n lyrics = None\n if len(args) == 0: # now playing lyrics\n if ctx.voice_client is not None and ctx.voice_client.is_playing():\n playing = state.now_playing\n title, lyrics = extract.get_lyrics(playing.title)\n print(len(lyrics))\n print(lyrics)\n\n else:\n await ctx.send(\"Nothing is playing currently, add a song title to the command to search\")\n return\n else: # search lyrics\n song = utils.argument_concat(args)\n if utils.url_validation(song):\n await ctx.send(\"This doesn't take urls fam, just enter the title of the song\")\n return\n title, lyrics = extract.get_lyrics(song)\n message = title + \"\\n\" + lyrics\n if len(message) > 2000:\n while len(message) > 2000:\n index = 2000\n while message[index] != \"\\n\":\n index -= 1\n mes = message[:index]\n sage = message[index:]\n messages.append(mes)\n message = sage\n else:\n messages.append(message)\n for string in messages:\n await ctx.send(string)", "def fetch_lyrics(self) -> None:\n if self.artist is None or self.title is None:\n return\n Logger.Logger.log('Looking for song lyrics...')\n finder = LyricsFinder.LyricsFinder(self)\n finder.fetch()\n self.lyrics = finder.get_lyrics()\n self.lyrics_writer = finder.get_lyrics_writer()\n if not self.lyrics:\n Logger.Logger.log('No lyrics found for this song.')", "def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def _has_song(self, title):\r\n for i in range(len(self._playlist)):\r\n song_name = self._playlist[i].split(':')[0]\r\n if song_name == title:\r\n return True\r\n return False", "def is_lyrics_approved():", "def song_already_exists(song, playlist_id):\n print('Song {title} already in playlist {playlist_id}, adding has been skipped.'\n .format(title=song.title,\n playlist_id=playlist_id))\n pass", "def fetch(self) -> None:\n lyrics: Tuple[Optional[str], Optional[str]] = (None, None)\n if self.query is not None:\n # If a search query for this song has been defined, search the lyrics using it.\n alternative: bool = False\n url: str = self.__search(False)\n if not url and self.minimal_query is not None and not Config.Config.get_strict_lyrics():\n # If no lyrics has been found using the normal search query, retry using a shorter and simpler one.\n Logger.Logger.log('No lyrics found using full song name, retrying using a shorter version...')\n alternative = True\n # Repeat the search telling the method to use the shorter query version.\n url = self.__search(True)\n if not url:\n Logger.Logger.log('No lyrics found in anyway.')\n if url:\n lyrics = AZLyrics.__load(url)\n if not lyrics and not alternative and not Config.Config.get_strict_lyrics():\n if self.minimal_query is not None:\n # Lyrics were found but its page was empty, retry searching it using the shorter query version.\n Logger.Logger.log('No lyrics found using full song name, retrying using a shorter version...')\n # Repeat the search telling the method to use the shorter query version.\n url = self.__search(True)\n if url:\n # Try again to fetch the song lyrics.\n lyrics = AZLyrics.__load(url)\n else:\n Logger.Logger.log('No lyrics found in anyway.')\n self.lyrics = lyrics[0]\n self.lyrics_writer = lyrics[1]", "def __add_lyric(self, song, genius_api):\n\t\tentry = {\n\t\t\t'song_id' : int(song['id']),\n\t\t\t'song_title' : song['title'],\n\t\t\t'url' : song['url']\n\t\t\t}\n\t\ttry:\n\t\t\tentry['lyrics'] = genius_api.get_lyrics(song['id'], song['url'])\n\t\texcept:\n\t\t\tentry['lyrics'] = ''\t\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\ttry:\n\t\t\tself.db.lyrics.insert_one(entry)\n\t\texcept errors.DuplicateKeyError:\n\t\t\tpass", "def __rh_song(self,row):\r\n return self.data[row].song != None", "def check_message(self, text=\"\"):\r\n lyrics_re = re.match(\"^!lyrics\\s(?P<name>[\\S\\s]+),\\s?(?P<artist>[\\S\\s]+)$\", text)\r\n\r\n if lyrics_re is not None:\r\n song = self._genius.search_song(lyrics_re.group(\"name\"), lyrics_re.group(\"artist\"))\r\n # If song is found, print out lyrics\r\n if song is not None:\r\n print(song.lyrics)\r\n lyrics_list = song.lyrics.split(\"\\n\\n\")\r\n for line in lyrics_list:\r\n self.post_message(line)\r\n time.sleep(.500)\r\n\r\n # Else print out error message\r\n else:\r\n error_message = \"ERROR: No Song Found for {name},{artist}\\nFormat is '!lyrics [name], [artist]'\".format(artist=lyrics_re.group(\"artist\"), name=lyrics_re.group(\"name\"))\r\n print(error_message)\r\n self.post_message(error_message)", "async def lyrics(self, ctx: commands.Context, *, song_name: str):\n try:\n client = await self.obtain_client()\n except AttributeError:\n await ctx.send(\"Not key for KSoft.Si has been set, ask owner to add a key.\")\n return\n try:\n music_lyrics = await client.music.lyrics(song_name)\n except ksoftapi.NoResults:\n await ctx.send(\"No lyrics were found for your music.\")\n return\n message, available_musics = await self._title_choose(music_lyrics)\n await ctx.maybe_send_embed(message)\n predicator = MessagePredicate.less(10, ctx)\n try:\n user_message = await self.bot.wait_for(\"message\", check=predicator, timeout=60)\n except Te:\n await ctx.send(\"It's so silent on the outside...\")\n return\n\n choosen_music = user_message.content\n if choosen_music not in available_musics:\n await ctx.send(\n \"I was unable to find the corresponding music in the available music list.\"\n )\n return\n music = available_musics[choosen_music]\n embeds = []\n embed = discord.Embed(color=await ctx.embed_color(), title=music.name, description=None)\n embed.set_thumbnail(url=music.album_art)\n embed.set_footer(text=\"Powered by KSoft.Si.\", icon_url=ctx.author.avatar_url)\n for text in pagify(music.lyrics):\n embed.description = text\n embeds.append(embed)\n create_task(menu(ctx, embeds, DEFAULT_CONTROLS)) # No await since max_concurrency is here", "def append(self, track):\r\n title, artists = self._process_dict(track)\r\n if not self._has_song(title) and title and artists:\r\n self._playlist.append('{}:{}'.format(title, artists))", "def get_lyrics(self, artist, song):\n\n # Disable lyrics display\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n\n lyrics = None\n in_database = False\n\n if self.database.status: # Testing connection to database\n lyrics = self.database.retrieve_lyrics(artist, song)\n if lyrics: # False if not found in database\n in_database = True\n\n if not lyrics: # Try next to retrieve from web\n url = self.make_url(artist, song)\n try:\n lyrics = self.fetch_lyrics(url)\n except:\n self.display_message('Internet Connection Problem') # Could not connect to internet\n return\n\n if not lyrics: # Not available in database or on web\n self.display_message('Lyrics Not Available')\n else:\n # Set the display\n lyrics_buffer = self.lyrics_view.get_buffer()\n lyrics_buffer.set_text(lyrics)\n\n if not in_database: # Save if not in database\n self.database.save(artist, song, lyrics)\n\n # Re-enable lyrics display\n self.scroll.show()\n self.lyrics_view.show()\n self.display_message('Lyrics Extracted Successfully')", "def validate_song_is_added_to_playlist(self):\n if self.track == 'Enjoy Enjaami':\n option = element['EnjoyEnjaami']\n elif self.track == 'Inna Mylu':\n option = element['InnaMylu']\n return com_util.find_text(self.driver, option)", "def _check_duplicate(self, artist):\n if self._duplicate(artist):\n raise ValueError(\"Already have an artist for this type \"\n \"and data\")", "def test_single_track_artist_too_long(self):\n self.add_mp3(set_artist=True, artist='z'*(App.max_artist_album_length+10))\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('is longer than', status)\n self.assertEqual(self.get_album_count(), 0)", "def clean(self):\n cleaned_data = super(AlbumForm, self).clean()\n title = cleaned_data['title']\n artist = cleaned_data['artist']\n\n duplicate = Album.objects.filter(title=title).filter(artist=artist)\n if duplicate.exists():\n raise forms.ValidationError('Album with same title and artist already in database')\n return cleaned_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is called to compare a lyrics note to the original to ensure they are not the same..if they are , such a lyrics note is rejected
def lyrics_note_is_same_as_original(): pass
[ "def test_two_mismatches(self):\n\n sam_fields = [\"test_read\", \"0\", \"chr1\", \"202892094\", \"255\", \"5M\", \"*\",\n \"0\", \"0\", \"ACCGA\", \"*\", \"NM:i:2\", \"MD:Z:1A0A2\", \"jI:B:i,-1\",\n \"jM:B:c,-1\" ]\n\n genome = Fasta(\"input_files/hg38_chr1.fa\")\n spliceAnnot = None\n variants = {}\n logInfo = TC.init_log_info(sam_fields)\n\n # Init transcript object\n transcript = t2.Transcript(sam_fields, genome, spliceAnnot)\n\n # Run correction\n error_entries = TC.correctMismatches(transcript, genome, variants, logInfo)\n\n # Check to see if correction was successful\n assert transcript.SEQ == \"AAAGA\"\n assert transcript.CIGAR == \"5M\"\n\n # Check the number and content of the transcript error entries\n print(error_entries)\n assert error_entries.count('\\n') == 2\n assert error_entries.count('Corrected') == 2", "def _check_duplicate_notes(self, tokens, curr_note, step) -> bool:\n same_note_cnt = 0\n idx = step - 3\n while idx > 0:\n prev_note = self._get_num(self.tgt_dict.string(tokens[0, idx : idx + 1]))\n if prev_note != curr_note:\n break\n same_note_cnt += 1\n idx -= 4\n\n if same_note_cnt > _config.PitchPara.Max_Same_Pitch.value:\n return True\n return False", "def check_note_for_history(self):\r\n testrun_notes = [\r\n \"multiple loci suspected\",\r\n \"suspected multicopy, poor performance\",\r\n \"fixed allele 1\",\r\n \"very poor amplification\",\r\n \"very poor amplification, high off target percent\",\r\n \"poor amplification, maybe redesign\",\r\n \"mono-allele 1?\",\r\n \"redesign primer\",\r\n \"most of target\",\r\n \"poor performance\",\r\n \"poor performance, primers off target\",\r\n \"off target amp\",\r\n \"mono-allele 1\",\r\n \"mono-allele 2 and off target\",\r\n \"Nate said it is a mess\",\r\n \"off target amp\",\r\n \"mono-allele 1 and off target\"\r\n ]\r\n if self.note == \"No primers made by primer3\":\r\n self.add_history(\"2018-2-12\",\"Nate\",\"primers were not made for this sequence variation\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Removed by nate, close to other SNP\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Primers designed for this SNP were taken out, were to close to other SNP\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Predicted to form hetrodymer\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Predicted to form hetrodymer\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"no valid primer pair could be made for this position\":\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note in testrun_notes:\r\n self.add_history(\"2018-2-23\",\"Thomas\",self.note)\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n #check if any were missed.\r\n if self.active and self.note != \"sequence variant selected by GBS-SNP-selection\":\r\n pass #print(self.note)\r", "def isEqualNote(self, notestring):\n if isinstance(notestring, Note):\n noteEncoding = notestring.getEncoding()\n else:\n note = self.new(notestring)\n simpleNote = note.simplify()\n noteEncoding = simpleNote.getEncoding()\n if noteEncoding == None:\n return False\n else:\n if self.getEncoding() == noteEncoding:\n return True\n else:\n return False", "def changed_line_is_compatible(before, after):\n if not before:\n return True\n if not after:\n return True\n # It's faster to compare the whole line as a single string, and\n # most of the time this will get us the right answer\n if before.text == after.text:\n return True\n # but if the text mismatches, we still have to compare the decoded\n # fields one by one\n if before.row == after.row:\n return True\n return False", "def areDifferentWords(self, motX, motY):\n return not motX == motY", "def quote_differs(a, b):\n for i in range(4):\n if a[i] != b[i]:\n return True\n return False", "def grosslydifferent(self, other, length=10, tolerance=7):\n diff = 0\n for i in range(length):\n if self._sequence[i] != other.sequence[i]:\n diff = diff + 1\n if diff > tolerance:\n return True\n return False", "def different_utr3(transcript1, transcript2):\n\n if len(transcript1.utr3_exons) != len(transcript2.utr3_exons):\n return True\n for i in range(len(transcript1.utr3_exons)):\n exon1 = transcript1.utr3_exons[i]\n exon2 = transcript2.utr3_exons[i]\n if exon1[0] != exon2[0] or exon1[1] != exon2[1]:\n return True\n return False", "def note_match(note_a: NOTE, note_b: NOTE) -> bool:\n return (\n note_a.pitch == note_b.pitch and\n abs(note_a.on - note_b.on) <= ONSET_DELTA\n )", "def get_mismatches(rec):\n qseq = rec.get_forward_sequence().upper()\n if rec.is_reverse:\n qseq = reverseComplement(qseq)\n rseq = rec.get_reference_sequence().upper()\n for qpos, rpos in rec.get_aligned_pairs():\n if qpos == None or rpos == None:\n continue # no indels yet\n q = qseq[qpos]\n r = rseq[rpos - rec.reference_start]\n if q != r:\n position = (rec.reference_name, rpos)\n change = (r, q)\n yield (position, change)", "def test_disambiguate(self):\n self.assertEqual(self.RNA(\"\").disambiguate(), \"\")\n self.assertEqual(\n self.RNA(\"AGCUGAUGUA--CAGU\").disambiguate(), \"AGCUGAUGUA--CAGU\"\n )\n self.assertEqual(\n self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\").disambiguate(\"strip\"), \"AU--CG\"\n )\n s = self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\")\n t = s.disambiguate(\"random\")\n u = s.disambiguate(\"random\")\n for i, j in zip(str(s), str(t)):\n if i in s.moltype.degenerates:\n assert j in s.moltype.degenerates[i]\n else:\n assert i == j\n self.assertNotEqual(t, u)\n self.assertEqual(len(s), len(t))", "def test_titles_do_not_match(self):\r\n gm_title = 'Zhao Hua'\r\n sp_title = 'MMXXX (ft Moor Mother)'\r\n self.assertFalse(gmspotify.titles_match(gm_title, sp_title))", "def test_is_not_similar(self):\n crash_state_1 = (\n 'start <= end ('\n 'SAMP\"0AA0AA00AAA0A00A0A0\"@offsetInAnchor[19]) in TextIterator.cpp\\n'\n 'blink::TextIteratorAlgorithm<blink::EditingAlgorithm<blink::'\n 'NodeTraversal> >::Te\\n'\n 'blink::PlainText\\n')\n crash_state_2 = ('false in gles2_cmd_utils.cc\\n'\n 'base::debug::DebugBreak\\n'\n 'gpu::gles2::GLES2Util::GLFaceTargetToTextureTarget\\n')\n self.is_similar_helper(crash_state_1, crash_state_2, False)", "def _is_unequal_seq_qual(read: Read) -> bool:\n if len(read.seq) != len(read.qual):\n return True\n return False", "def verify_harmonized_sequence(self):\n if str(self.original_translated_sequence) == str(self.harmonized_translated_sequence):\n return True\n else:\n return False", "def is_lyrics_approved():", "def testEqualityFalse1(self):\n dp1 = drug_problem_kb.ProblemRelation(*self.atenolol[0])\n dp2 = drug_problem_kb.ProblemRelation(*self.atenolol[1])\n self.assertNotEqual(dp1, dp2)", "def texts_are_equivalent(texta, textb):\n\n def normalized_lines(text):\n for l in text.splitlines():\n l = l.strip()\n if l:\n yield l\n\n texta = \"\\n\".join(normalized_lines(texta))\n textb = \"\\n\".join(normalized_lines(textb))\n return texta == textb" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the lyrics has been approved or not
def is_lyrics_approved():
[ "def approve_lyrics():\n pass", "def is_approved(self):\n if askbot_settings.ENABLE_CONTENT_MODERATION:\n if self.approved == False:\n return False\n return True", "def is_approved_speaker(self):\n try:\n return self.speaker.is_approved()\n except ObjectDoesNotExist:\n return False", "def song_has_lyrics():\n pass", "def trip_review_ready(self):\n if self.requests.filter(status__in=[12, 16, 17, ]).exists():\n can_proceed = False\n reason = _(\"some requests are still in the review / recommendation phase.\")\n elif self.current_reviewer and self.current_reviewer.role == 5: # There are different criteria for ADM\n adm_ready_travellers = Traveller.objects.filter(request__trip=self, request__status=14)\n if adm_ready_travellers.exists():\n can_proceed = True\n reason = _(\"By approving this trip, the following travellers will be automatically approved: \") + \\\n f'<ul class=\"mt-3\">{listrify([f\"<li>{t.smart_name}</li>\" for t in adm_ready_travellers.all()], \"\")}</ul>'\n else:\n can_proceed = True\n reason = _(\"All actionable requests have been actioned.\")\n # this is a special case of the below scenario, where no trips are ready for ADM but should still proceed\n elif self.requests.count() == self.requests.filter(Q(status=11) | Q(status=8) | Q(status=10) | Q(status=22)).count():\n can_proceed = True\n reason = _(\"All actionable requests have already been approved.\")\n elif not self.requests.filter(status=14).exists():\n can_proceed = False\n reason = _(\"there are no requests ready for ADM approval.\")\n else:\n can_proceed = True\n reason = _(\"all active requests are ready for ADM review.\")\n return dict(can_proceed=can_proceed, reason=reason)", "def is_approved(self) -> bool:\n return self.state == Order.OrderState.APPROVED.choice_value", "def is_approved(self):\n return self.moderator_state in (Page.MODERATOR_APPROVED, Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS)", "def honey_pot_check(self):", "def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")", "def _is_approved_usecases(self) -> bool:\n non_approved_usecases = set()\n try:\n approved_usecases = tools.get_approved_usecases()\n pack_meta_file_content = json.loads(self._read_file_content(self.pack_meta_file))\n non_approved_usecases = set(pack_meta_file_content[PACK_METADATA_USE_CASES]) - set(approved_usecases)\n if non_approved_usecases:\n if self._add_error(\n Errors.pack_metadata_non_approved_usecases(non_approved_usecases), self.pack_meta_file):\n return False\n except (ValueError, TypeError):\n if self._add_error(Errors.pack_metadata_non_approved_usecases(non_approved_usecases), self.pack_meta_file):\n return False\n return True", "def courtesy(self):\n if self.reactions[\"COURTESY\"] != \"True\":\n self.bonus_message.append(self.bot_answers[\"COURTESY\"])", "def can_author_assessments(self):\n return # boolean", "def meet_requirement(self, loan):\n return True", "def ConfirmAllowedCopyrightHolder(holder):\n return holder in ALLOWED_COPYRIGHT_HOLDERS", "def APPROVED(self):\n return \"approved\"", "def check_approve_status(answer_id):\n with DatabaseConnection() as cursor:\n cursor.execute(\"SELECT approve FROM answers WHERE answer_id = '%s'\", [answer_id])\n approve = cursor.fetchone()\n if approve ==\"Yeah\":\n return \"Best answer\"", "def test_enough_votes_accept_thesis(self):\n self.thesis.set_students([])\n self.thesis.save()\n self.vote_to_accept_thesis_required_times()\n modified_thesis = self.get_modified_thesis()\n self.assertEqual(modified_thesis[\"status\"], ThesisStatus.ACCEPTED.value)", "def check(m):\n return m.channel == ctx.message.channel and m.author == ctx.author and m.content in [\"s\", \"n\", \"S\", \"N\", \"sim\", \"nao\", \"não\", \"Sim\", \"Nao\", \"Não\"]", "def set_approved(self):\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Calculate the cold plasma dispersion surfaces according to equation 2.64 in Plasma Waves by Swanson (2nd ed.)
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e): # Make vectors of the wave numbers kc_z = np.linspace(1e-6, kc_z_max, 35) kc_x = np.linspace(1e-6, kc_x_max, 35) # Turn those vectors into matrices kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z) # Find some of the numbers that appear later in the calculations kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B wc_i = 1 / m_i # The ion gyro frequency wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # For every k_perp and k_par, turn the dispersion relation into a # polynomial equation and solve it. # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # The polynomial coefficients are calculated pol_koeff_8 = -2 * kc_ ** 2 pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape) pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2) pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2) pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos( theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i)) pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2 w_final = np.zeros((10, len(kc_z), len(kc_x))) # For each k, solve the equation for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))): disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0, pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x], 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]] # theoretically should be real (A. Tjulin) w_temp = np.real(np.roots(disp_polynomial)) # We need to sort the answers to get nice surfaces. w_final[:, k_z, k_x] = np.sort(w_temp) n2_ = kc_ ** 2 / w_final ** 2 v_ph_c = np.sqrt(1. / n2_) va_c = 1 / (wp_e * np.sqrt(m_i)) v_ph_va = v_ph_c / va_c diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i) e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor) e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_ b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z) dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]] dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)] dw_x[:, :, 1:] = np.diff(w_final, axis=2) dw_z[:, 1:, :] = np.diff(w_final, axis=1) v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])] s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z) # Compute ion and electron velocities v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z) # Ratio of parallel and perpendicular to B speed vepar_perp = v_ez * np.conj(v_ez) vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey)) vipar_perp = v_iz * np.conj(v_iz) vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy)) # Total particle speeds v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez) v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz) # Ion and electron energies m_e = -1 en_e = 0.5 * m_e * v_e2 en_i = 0.5 * m_i * v_i2 # Ratio of particle and field energy densities ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot) # Continuity equation dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz) dn_e_n_db_b = dn_e_n / b_tot dn_i_n_db_b = dn_i_n / b_tot dn_e_n_dbpar_b = dn_e_n / b_par dn_i_n_dbpar_b = dn_i_n / b_par dn_e = dn_e_n * wp_e ** 2 k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e)) # Build output dict extra_param = {"Degree of electromagnetism": np.log10(b_tot / e_tot), "Degree of longitudinality": np.abs(e_par) / e_tot, "Degree of parallelity E": e_z / e_tot, "Degree of parallelity B": np.sqrt( b_z * np.conj(b_z)) / b_tot, "Ellipticity E": e_pol, "Ellipticity B": b_pol, "E_part/E_field": np.log10(ratio_part_field), "v_g": np.sqrt(v_x ** 2 + v_z ** 2), "v_ph/v_a": np.log10(v_ph_va), "E_e/E_i": np.log10(en_e / en_i), "v_e/v_i": np.log10(np.sqrt(v_e2 / v_i2)), "v_epara/v_eperp": np.log10(vepar_perp), "v_ipara/v_iperp": np.log10(vipar_perp), "dn_e/dn_i": np.log10(dne_dni), "(dn_e/n)/ (dB/B)": np.log10(dn_e_n_db_b), "(dn_i/n)/(dB/B)": np.log10(dn_i_n_db_b), "(dn_i/n)/(dBpar/B)": np.log10(dn_i_n_dbpar_b), "(dn_e/n)/(dB/B)": np.log10(dn_e / k_dot_e), "(dn_e/n)/(dBpar /B)": np.log10(dn_e_n_dbpar_b), " Spar/Stot": s_par / s_tot} for k, v in zip(extra_param.keys(), extra_param.values()): extra_param[k] = np.transpose(np.real(v), [0, 2, 1]) kx_ = np.transpose(kc_x_mat) kz_ = np.transpose(kc_z_mat) wf_ = np.transpose(w_final, [0, 2, 1]) return kx_, kz_, wf_, extra_param
[ "def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n #################################\n # \tSURFACE KPP\n ################################\n #---> j-loop\n \n self.wm2 = []\n self.ws2 = []\n self.sigma_y = []\n for j in range(Ly):\n #--> k-loop (top to kbl[j])\n # in fortran k=N-1,kbl(j),-1\n for k in range(N-1,self.kbl[j]-1,-1):\n k_w = k\n k_r = k-1\n\n Bfsfc = self.Bfsfc_bl[j]\n zscale = z_u_w[j,N] - z_u_w[j,k_w]\n \n # CALCULATE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm2.append(wm)\n self.ws2.append(ws)\n # COMPUTE VERTICAL MIXING COEFFICIENTS\n sigma = (z_u_w[j,N] - z_u_w[j,k_w]) / np.max([self.hbls[j],self.eps])\n self.sigma1 = sigma #for debugging\n if j == 25: \n self.sigma_y.append(sigma)\n a1 = sigma - 2.\n a2 = 3.-2.*sigma\n a3 = sigma - 1.\n\n if sigma < 0.07:\n cff = 0.5 * (sigma-0.07)**2/0.07\n else:\n cff = 0\n \n \n if k == N-1: \n self.wm_debug = wm\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n\n self.Kv_surf[j,k_w] = wm * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gm1[j]+a3*self.dGm1_dS[j])))\n\n if k == N-1:\n self.ws_debug = ws\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n \n self.Kt_surf[j,k_w] = ws * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gt1[j]+a3*self.dGt1_dS[j])))\n #---> end k-loop \n if self.LMD_NONLOCAL:\n if Bfsfc < 0:\n self.ghat[j,k_w] = 0\n self.ghat[j,k_w] = self.Cg * sigma * (1.-sigma)**2\n else:\n self.ghat[j,k_w] = 0.\n\n # ADD CONVECTIVE ADJUSTMENT IN SURFACE MIXED LAYER \n if self.LMD_CONVEC and self.MLCONVEC: \n for k in range(N-1,int(self.kbl[j]-1),-1):\n k_w = k\n k_r = k -1\n\n if self.bvf[j,k_w] < 0:\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.ffac*self.nu0c\n\n # ADD CONVECTIVE ADJUSTMENT BELOW SURFACE MIXED LAYER\n # IF BKPP IS SWITCHED OFF!!\n for k in range(int(self.kbl[j]-1),-1,-1):\n k_w = k\n k_r = k -1\n if self.LMD_NONLOCAL:\n self.ghat[j,k_w] = 0\n if self.LMD_CONVEC and self.LMD_BKPP == False:\n if self.bvf[j,k_w] < 0:\n self.Kv_surf[j,k_w] = self.Kv_surf[j,k_w] + self.nu0c\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.nu0c\n \n\n #---> end j-loop", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def clumpy(self):\n # IMPORT STUFF\n from numpy.convolve import boxcar\n from flags import addflag\n # END IMPORT\n \n # INPUTS\n sky = self['BACKGROUND']\n stamp = self['STAMP'] \n mask = self['MASK']\n # mask_sky = self['MASK_SKY']\n r_petro = self['R_PETRO']\n box2petro = self.execpars['box2petro'][0]\n # END INPUTS\n \n stamp -= sky\n \n def maskit(stamp,mask):\n masked = num.zeros(shape=stamp.shape,type=stamp.type())\n active = num.where(mask == 0)\n masked[active] = stamp[active]\n return masked\n\n masked = maskit(stamp,mask)\n # masked_sky = maskit(stamp,mask_sky)\n boxwidth = int(num.around(box2petro * r_petro))\n if boxwidth % 2 == 0 : boxwidth += 1\n if boxwidth < 3 : boxwidth = 3\n\n smoothed = boxcar(masked,(boxwidth,boxwidth),mode='reflect') \n # skysmoothed = boxcar(masked_sky,(boxwidth,boxwidth),mode='reflect') \n \n def g(image,simage,num):\n up = num.abs(image - simage)\n down = num.abs(image)\n up = num.sum(num.sum(up))\n down = num.sum(num.sum(down))\n return up/down\n \n # Bs = g(masked_sky,skysmoothed)\n Bs = 0\n S = g(masked,smoothed,num) - Bs\n \n S_flags = 0L\n \n self['M_S'] = S\n self['flags'] = addflag(self['flags'],S_flags)\n \n # return S, S_flags", "def sky_brightness_change(phase, sep, Z, Zm, k, vzen):\n sep_r = np.deg2rad(sep*u.deg)\n Z = np.deg2rad(Z*u.deg)\n Zm = np.deg2rad(Zm*u.deg)\n \n # Zenith brightness in nL\n Bzen = 34.08*np.exp(20.7233-0.92104*vzen) #eq1\n # Illuminance of moon outside of atmosphere\n illum = 10**(-0.4*(3.84+0.026*abs(phase)+4e-9*phase**4)) #eq20\n # Rayleigh scattering\n scat_r = 10**5.36*(1.06+np.cos(sep_r)**2) #eq17\n # Mie scattering\n if sep >= 10:\n scat_m = 10**(6.15-sep/40.) #eq18\n else:\n #scat_m = 6.2e7*sep**(-2) #eq19\n return(-20)\n # Total scattering\n scat = scat_r + scat_m #eq16\n \n # Model surface brightness of Moon\n Bmoon = scat*illum*10**(-0.4*k*airmass_calc(Zm))*(1-10**(-0.4*k*airmass_calc(Z))) #eq15\n # Dark nighttime sky brightness as a function of zenith distance\n B0 = Bzen*10**(-0.4*k*(airmass_calc(Z)-1))*airmass_calc(Z) #eq2 \n # Change in V band sky brightness caused by moonlight\n delv = -2.5*np.log10((Bmoon+B0)/B0) #eq22\n return delv", "def snow_depth(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [28.01, 30.28, 33.89, 36.80, 36.93, 36.59,\n 11.02, 4.64, 15.81, 22.66, 25.57, 26.67] )\n a = np.array( [ 0.1270, 0.1056, 0.5486, 0.4046, 0.0214, 0.7021,\n 0.3008, 0.3100, 0.2119, 0.3594, 0.1496, -0.1876] )\n b = np.array( [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819,\n -1.2591, -0.6350, -1.0292, -1.3483, -1.4643, -1.4229] )\n c = np.array( [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195,\n -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413] )\n d = np.array( [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009,\n -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316] )\n e = np.array( [ 0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603,\n -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n return h", "def compactness(self):\n surf_n, surf_nb, surf_v, surf_vb = self.surface()\n vol_n, vol_nb, vol_v, vol_vb = self.volume()\n return np.power(surf_n, 1.5) / surf_n, np.power(surf_nb, 1.5) / \\\n vol_nb, np.power(surf_v, 1.5) / vol_v, \\\n np.power(surf_vb, 1.5) / vol_vb", "def pwlFly(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n Bvec_complete = []\n Sol_complete = []\n meas_complete = []\n model_complete = []\n postchis = []\n prechis = []\n aics = []\n bics = []\n #w = 1;\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n #print(\"NUMD:\",numd)\n if numd < 2:\n continue\n #\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n # This gets too large for lots of observations, s best to doit on the fly..\n #\n Neq = np.eye(numZD,dtype=float)# * 0.001\n Apart = np.zeros((numd,numZD))\n\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = (1.-(azData[i,2]-iz*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-iz*zenSpacing)/zenSpacing\n w = np.sin(data[i,2]/180.*np.pi)\n for k in range(iz,iz+2):\n for l in range(iz,iz+2):\n Neq[k,l] = Neq[k,l] + (Apart[i,l]*Apart[i,k]) * 1./w**2\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n #Qxx = np.dot(Apart.T,Apart)\n #Qvv = np.subtract( np.eye(numd) , np.dot(np.dot(Apart,Qxx),Apart.T))\n #sd = np.squeeze(np.diag(Qvv))\n #dx = np.dot(np.linalg.pinv(Qxx),Bvec)\n #dl = np.dot(Apart,dx)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n postchis.append(np.sqrt(postchi/numd))\n prechis.append(np.sqrt(prechi/numd))\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n # calculate the model values for each obs\n model = np.dot(Apart,Sol) #np.zeros(numd)\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n # zen = azData[d,2]\n # iz = int(np.floor(azData[d,2]/zenSpacing))\n # #model[d] = Sol[iz]\n\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),gls_results.rsquared,gls_results.aic,gls_results.bic)\n \n # loglikelihood(meas,model,sd)\n #sd = np.squeeze(np.diag(Qvv))\n #print(\"meas, model, sd:\",np.shape(azData),np.shape(model),np.shape(sd))\n f = loglikelihood(azData[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n aics.append(aic) \n bics.append(bic) \n #print(\"=========================\")\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #A_complete = np.squeeze(np.asarray(A_complete.todense()))\n #print(\"A shape\",np.shape(A_complete))\n\n print(\"Doing a fit to the data\")\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n\n return pwl_All, pwlSig_All", "def finiteKPerpdispRel(w):\r\n #Various plasma frequencies\r\n Pi_he=np.sqrt((ne*heRatio)*q_p**2/(eps_0*4*m_amu)) #Helium plasma frequency\r\n Pi_ne=np.sqrt((ne*neRatio)*q_p**2/(eps_0*20*m_amu)) #Neon plasma frequency\r\n Pi_e=np.sqrt(ne*q_e**2/(eps_0*m_e)) #Electron plasma frequency\r\n Omega_he=q_p*magB/(4*m_amu) #Helium cyclotron frequency\r\n Omega_ne=q_p*magB/(20*m_amu) #Neon cyclotron frequency\r\n Omega_e=q_e*magB/(m_e) #Electron cyclotron frequency\r\n \r\n #R,L and P\r\n R=1-((Pi_e**2/w**2)*(w/(w+Omega_e)))-((Pi_he**2/w**2)*(w/(w+Omega_he)))-((Pi_ne**2/w**2)*(w/(w+Omega_ne))) #Right-hand polarized wave\r\n L=1-((Pi_e**2/w**2)*(w/(w-Omega_e)))-((Pi_he**2/w**2)*(w/(w-Omega_he)))-((Pi_ne**2/w**2)*(w/(w-Omega_ne))) #Left-hand polarized wave\r\n P=1-(Pi_e**2/(w*(w+1j*nu_e)))-(Pi_he**2/w**2)-(Pi_ne**2/w**2) #Unmagnetized plasma\r\n\r\n #S and D\r\n S=(R+L)/2\r\n D=(R-L)/2\r\n \r\n #u=w**2/c**2\r\n u=(w/c)**2\r\n \r\n #g_perp=k_perp**2\r\n gPerp=kPerp**2\r\n \r\n #Cubic equation coefficients\r\n bTerm=(gPerp*S/P)+(2*gPerp)-(L*u)-(R*u)\r\n cTerm=(2*gPerp*gPerp*S/P)-(gPerp*R*L*u/P)-(gPerp*S*u)+(gPerp*gPerp)-(gPerp*L*u)-(gPerp*R*u)+(R*L*u*u)\r\n dTerm=(gPerp*gPerp*gPerp*S/P)-(gPerp*gPerp*R*L*u/P)-(gPerp*gPerp*S*u)+(gPerp*R*L*u*u)\r\n \r\n #Depressed cubic equation coefficients\r\n pTerm=(3*cTerm-bTerm*bTerm)/3\r\n qTerm=(2*bTerm*bTerm*bTerm-9*bTerm*cTerm+27*dTerm)/27\r\n \r\n #kPar\r\n kPar=0\r\n if 4*pTerm*pTerm*pTerm+27*qTerm*qTerm>0:\r\n #Single real root\r\n term1=(-qTerm/2+np.sqrt((qTerm*qTerm/4)+(pTerm*pTerm*pTerm/27)))**(1/3)\r\n term2=(-qTerm/2-np.sqrt((qTerm*qTerm/4)+(pTerm*pTerm*pTerm/27)))**(1/3)\r\n realRoot=term1+term2\r\n \r\n #Convert back to original cubic\r\n gPar=realRoot-bTerm/3\r\n \r\n #Calcualte kPar\r\n kPar=np.sqrt(gPar)\r\n \r\n else:\r\n #arccos term\r\n arccosTerm=np.arccos((3*qTerm/(2*pTerm))*np.sqrt(-3/pTerm))\r\n #cos term\r\n k=0\r\n cosTerm=np.cos((1/3)*arccosTerm-2*np.pi*k/3)\r\n \r\n #Real root\r\n realRoot=2*np.sqrt(-pTerm/3)*cosTerm\r\n \r\n #Convert back to original cubic\r\n gPar=realRoot-bTerm/3\r\n \r\n #Calcualte kPar\r\n kPar=np.sqrt(gPar)\r\n \r\n return kPar", "def surface_composite_newton_cotes(left_bd, right_bd, n=20, m=2):\n # actual number of discrete points in every dimension\n N = m + (n-1)*(m-1)\n z = SX.sym('z', N, N)\n\n # distance between two discretization points\n h = (right_bd - left_bd)/(N-1)\n\n # generate Newton-Cotes coefficients for each partial interval\n print(\"Generating Newton-Cotes coeffs...\")\n [c, B] = scipy.integrate.newton_cotes(m-1)\n c = 1/(m-1) * c\n print(\"...done!\\n\")\n # define the approximated surface function\n # initiate with zero for iterated assembly\n sfc = SX.sym('sfc', 1)\n sfc[0] = 0\n\n # write Newton-Cotes coefficients into symbolic vector\n coeff = SX(c)\n c = coeff\n\n print(\"Assembling surface functional...\")\n s = 0\n\n for k in range(0, n):\n for l in range(0, n):\n for i in range(0, m):\n for j in range(0, m):\n\n ind_i = k*(m-1) + i\n ind_j = l*(m-1) + j\n\n if(ind_i == N - 1):\n dy = (z[ind_i - 1, ind_j]-z[ind_i, ind_j])/h\n else:\n dy = (z[ind_i + 1, ind_j]-z[ind_i, ind_j])/h\n\n if(ind_j == N - 1):\n dx = (z[ind_i, ind_j - 1]-z[ind_i, ind_j])/h\n else:\n dx = (z[ind_i, ind_j + 1]-z[ind_i, ind_j])/h\n\n\n sfc = sfc + c[i]*c[j]*sqrt(1 + dx**2 + dy**2)\n s = s + 1\n\n sfc = (((right_bd - left_bd)/(n))**2) * sfc\n print(\"...done! Performed s = \", s, \"assembly operations.\\n\")\n return sfc,z,N", "def solid_surface_density_RC2014_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n mult_obs = sss_per_sys['Mtot_obs']\n mult_obs_2p = []\n a_obs_2p = []\n core_mass_obs_2p = []\n sigma_obs_2p = []\n for i in np.arange(len(mult_obs))[mult_obs > 1]: # only consider multi-planet systems\n a_sys = gen.a_from_P(sss_per_sys['P_obs'][i], sss_per_sys['Mstar_obs'][i])\n core_mass_sys = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(sss_per_sys['radii_obs'][i][a_sys > 0])\n core_mass_sys[core_mass_sys > max_core_mass] = max_core_mass\n a_sys = a_sys[a_sys > 0]\n\n mult_obs_2p += [len(a_sys)]*len(a_sys)\n a_obs_2p += list(a_sys)\n core_mass_obs_2p += list(core_mass_sys)\n sigma_obs_2p += list(solid_surface_density_system_RC2014(core_mass_sys, a_sys))\n mult_obs_2p = np.array(mult_obs_2p)\n a_obs_2p = np.array(a_obs_2p)\n core_mass_obs_2p = np.array(core_mass_obs_2p)\n sigma_obs_2p = np.array(sigma_obs_2p)\n return sigma_obs_2p, core_mass_obs_2p, a_obs_2p, mult_obs_2p", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def sky_brightness_change(phase, sep, Z, Zm, k, vzen):\n sep_r = np.deg2rad(sep * u.deg)\n Z = np.deg2rad(Z * u.deg)\n Zm = np.deg2rad(Zm * u.deg)\n\n # Zenith brightness in nL\n Bzen = 34.08 * np.exp(20.7233 - 0.92104 * vzen) # eq1\n # Illuminance of moon outside of atmosphere\n illum = 10 ** (-0.4 * (3.84 + 0.026 * abs(phase) + 4e-9 * phase ** 4)) # eq20\n # Rayleigh scattering\n scat_r = 10 ** 5.36 * (1.06 + np.cos(sep_r) ** 2) # eq17\n # Mie scattering\n if sep >= 10:\n scat_m = 10 ** (6.15 - sep / 40.) # eq18\n else:\n # scat_m = 6.2e7*sep**(-2) #eq19\n return (-20)\n # Total scattering\n scat = scat_r + scat_m # eq16\n\n # Model surface brightness of Moon\n Bmoon = scat * illum * 10 ** (-0.4 * k * airmass_calc(Zm)) * (1 - 10 ** (-0.4 * k * airmass_calc(Z))) # eq15\n # Dark nighttime sky brightness as a function of zenith distance\n B0 = Bzen * 10 ** (-0.4 * k * (airmass_calc(Z) - 1)) * airmass_calc(Z) # eq2\n # Change in V band sky brightness caused by moonlight\n delv = -2.5 * np.log10((Bmoon + B0) / B0) # eq22\n return delv", "def solid_surface_density_nHill_given_observed_catalog(sss_per_sys, max_core_mass=10., n=10.):\n Mstar_obs = np.repeat(sss_per_sys['Mstar_obs'][:,None], np.shape(sss_per_sys['P_obs'])[1], axis=1)[sss_per_sys['P_obs'] > 0] # flattened array of stellar masses repeated for each planet\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_nHill(core_mass_obs, a_obs, Mstar=Mstar_obs, n=n)\n return sigma_obs, core_mass_obs, a_obs", "def oceansim(sun_az,sun_zen,cam_head,cam_elev=0,m2=1.33,npart=1.08,mu=3.483, debug=True):\n\n #Water surface norm\n n = np.array([0,0,1])\n m1 = 1.0\n #vector from sun:\n ki = -np.asarray([np.sin(sun_az)*np.sin(sun_zen),\n np.cos(sun_az)*np.sin(sun_zen),\n np.cos(sun_zen)])\n xi = norm_cross(n,ki)\n #transmitted sunlight\n #tx, ty are the transmission amplitude coefficients in the xt, yt directions\n kt,tx,ty = Fresnel.transmission(ki,n,m1,m2)\n xt = xi\n #vector to camera\n kc = -np.asarray([np.sin(cam_head)*np.cos(cam_elev),\n np.cos(cam_head)*np.cos(cam_elev),\n np.sin(cam_elev)])*np.linalg.norm(kt)\n xc = norm_cross(n, kc) #right\n yc = norm_cross(kc, xc) #up\n #vectors for scattering\n ys = norm_cross(kt, kc) # y-axis of scattering event\n xst = norm_cross(ys, kt) # x-axis of scattering event relative to transmitted sunlight\n xsc = norm_cross(ys, kc) # x-axis of scattering event relative to camera\n #Mueller matrices\n # transmission through water surface:\n mm1 = Mueller.polarizer(tx,ty)\n # rotate to scattering plane\n mm2 = Mrotv(kt,xt,xst)\n # scatter\n th_s = vector_angle(kt,kc)\n #mm3 = Mocean(rad2deg(th_s)) #using Empirical ocean scattering\n mm3 = Mueller.rayleigh_norm(th_s) #normalized Rayleigh scattering matrix\n #b = Scattering.bsf_fournier(npart,mu)\n b = Scattering.vspf_fournier(th_s,npart,mu)\n # transform to camera's horizontal and up vectors\n mm4 = Mxform(xsc,ys, xc,yc)\n #Combined: mm4 . (b*mm3) . mm2 . mm1\n m = mm4.dot(b*mm3.dot(mm2.dot(mm1)))\n #stokes vector\n s = m.dot([1,0,0,0])\n if debug:\n return s,m,(ki,xi),(kt,xt,xst),(kc,xc,xsc),(mm1,mm2,mm3,b,mm4)\n else:\n return s,m", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "def comp_surface_magnets(self):\n if self.magnet_0:\n return self.W0 * self.H1\n else:\n return 0", "def calculate_sky_and_reflection_components(self):\n\n # FIXME: not very robust, make sure to have a test for it\n self.surface_registry['isotropic_term'] = (self.vf_matrix[:-1, -1]\n * self.irradiance_terms[-1])\n self.surface_registry['reflection_term'] = (\n self.surface_registry['qinc']\n - self.surface_registry['irradiance_term']\n - self.surface_registry['isotropic_term']\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test Restaurant.__check_conditions decorator Test must be passed if functions with this decorator raised error cause of Hall, Delivery or Kitchen was not setted.
def test_open_no_setup(restaurant_only, hall_only, kitchen_only, delivery_only): # Here checks not all variants, cause restaurant_only is not isolated # object. They were previously check and working alongside # but affects result if together. # no setups with pytest.raises(CustomWarning): restaurant_only.open() assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall" # only kitchen with pytest.raises(CustomWarning): restaurant_only.set_kitchen(kitchen_only) restaurant_only.open() assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall" # only delivery and kitchen with pytest.raises(CustomWarning): restaurant_only.set_delivery(delivery_only) restaurant_only.set_kitchen(kitchen_only) restaurant_only.open() assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall"
[ "def __preflight_check(self) -> None:\n\n if not self.can_proceed:\n if config.settings.dry_run:\n raise AssertionError(f'\"{self.name}\" cannot proceed because of dry run')\n\n raise PreconditionFailedError(f'\"{self.name}\" cannot proceed')", "def test_properties(self):\n assert self.strategy.ledger_id == self.skill.skill_context.default_ledger_id\n assert self.strategy.contract_id == str(ERC1155Contract.contract_id)\n assert self.strategy.mint_quantities == self.mint_quantities\n assert self.strategy.token_ids == self.strategy._token_ids\n\n self.strategy._token_ids = None\n with pytest.raises(ValueError, match=\"Token ids not set.\"):\n assert self.strategy.token_ids\n\n with pytest.raises(ValueError, match=\"Contract address not set!\"):\n assert self.strategy.contract_address\n self.strategy.contract_address = self.contract_address\n with pytest.raises(AEAEnforceError, match=\"Contract address already set!\"):\n self.strategy.contract_address = self.contract_address\n assert self.strategy.contract_address == self.contract_address\n\n assert self.strategy.is_contract_deployed is False\n self.strategy.is_contract_deployed = True\n assert self.strategy.is_contract_deployed is True\n with pytest.raises(AEAEnforceError, match=\"Only allowed to switch to true.\"):\n self.strategy.is_contract_deployed = False\n\n assert self.strategy.is_tokens_created is False\n self.strategy.is_tokens_created = True\n assert self.strategy.is_tokens_created is True\n with pytest.raises(AEAEnforceError, match=\"Only allowed to switch to true.\"):\n self.strategy.is_tokens_created = False\n\n assert self.strategy.is_tokens_minted is False\n self.strategy.is_tokens_minted = True\n assert self.strategy.is_tokens_minted is True\n with pytest.raises(AEAEnforceError, match=\"Only allowed to switch to true.\"):\n self.strategy.is_tokens_minted = False\n\n assert self.strategy.gas == self.strategy._gas", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def check(condition):", "def check_requirements(self): # pylint: disable=no-self-use\n self.is_skipped = False", "def _check(self):\n if self.action_on_failure not in self.ACTION_ON_FAILURE:\n raise type_utils.TestListError(\n 'action_on_failure must be one of \"NEXT\", \"PARENT\", \"STOP\"')\n\n if self.parallel:\n if not self.subtests:\n raise type_utils.TestListError(\n '`parallel` should be set on test group')\n for subtest in self.subtests:\n if not subtest.IsLeaf():\n raise type_utils.TestListError(\n 'Test %s: all subtests in a parallel test should be leaf nodes' %\n self.id)\n if subtest.enable_services or subtest.disable_services:\n raise type_utils.TestListError(\n 'Test %s cannot be parallel with enable_services or '\n 'disable_services specified.' % subtest.id)\n\n # all subtests should come before teardown tests\n it = iter(self.subtests)\n if not self.teardown:\n # find first teardown test\n it = itertools.dropwhile(lambda subtest: not subtest.teardown, it)\n for subtest in it:\n if not subtest.teardown:\n raise type_utils.TestListError(\n '%s: all subtests should come before teardown tests' % self.id)\n\n for subtest in self.subtests:\n subtest._check() # pylint: disable=protected-access", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def assert_all_calls(\n src: str, validation_problem: str,\n *conditions: CallableConditionOption\n) -> None:\n if ASSERTION_ENABLED: # pylint: disable=too-many-nested-blocks\n for cond in conditions:\n if callable(cond):\n if not cond():\n raise PetroniaInvalidState(src, validation_problem, None)\n else:\n if not cond[0]():\n reason = 'failed check'\n vargs: Sequence[Any] = EMPTY_LIST\n kvargs: Dict[str, Any] = EMPTY_DICT\n if len(cond) >= 2:\n reason = cond[1]\n if len(cond) >= 4:\n vargs = cond[2] # type: ignore\n kvargs = cond[3] # type: ignore\n elif len(cond) == 3:\n if isinstance(cond[2], dict): # type: ignore\n kvargs = cond[2] # type: ignore\n else:\n vargs = cond[2] # type: ignore\n raise PetroniaInvalidState(\n src,\n validation_problem,\n reason.format(*vargs, **kvargs)\n )", "def test_create_restaurant(self, restaurant):\n assert restaurant is not None", "def test_check_args_weekend(self):\n test_date = dt.datetime(2021, 6, 20, 11, 0, 0)\n with self.assertRaises(ValueError) as context:\n self.duedate.check_args(test_date, self.test_turn_time)\n self.assertTrue(\n \"You can submit requests during weekdays only.\" in str(context.exception))", "def _check_parameters(self, target_function, **kwargs):\n # Ensure all arguments are =< 0 where relevant\n for keyword, value in kwargs.items():\n # Two conditions\n value_is_less_than_zero = value < 0\n keyword_is_relevant = keyword in ['mean', 'constant', 'low', 'mode', 'high']\n # Test conditions\n if keyword_is_relevant and value_is_less_than_zero:\n raise FairException('\"{}\" is less than zero.'.format(keyword))\n # Check that all required keywords are provided\n required_keywords = self._required_keywords[target_function]\n for required_keyword in required_keywords:\n if required_keyword in kwargs.keys():\n pass\n else:\n raise FairException('\"{}\" is missing \"{}\".'.format(str(target_function), required_keyword))", "def validate(self):\n try:\n assert self.__age_calculate() is True, Exception('Age is less than expected')\n assert self.__is_user_repeated() is True,Exception(\n 'Recently request received in last 5 days')\n assert self.__is_indian_or_american() is True, Exception(\n 'Nationality should be india or america')\n assert self.__check_state() is True, Exception('State should be valid')\n assert self.__check_salary() is True, Exception(\n 'Salary should be below 90k and above 10k')\n self.__log.write_log(\"All Validation is Successful\")\n self.__response = {'response':'success'}\n return True\n except AssertionError as error:\n self.__response = {'response':f\"{error}\"}\n self.__log.write_log(\"Validation Error...Check the Eligibility Criteria...\")\n return False", "def precondition(self, *args, **kwargs):\n pass", "def check_preconditions(preconditions, game, print_failure_reasons=True):\n all_conditions_met = True\n for check in preconditions: \n if check == \"inventory_contains\":\n item = preconditions[check]\n if not game.is_in_inventory(item):\n all_conditions_met = False\n if print_failure_reasons:\n print(\"You don't have the %s\" % item.name)\n if check == \"in_location\":\n location = preconditions[check]\n if not game.curr_location == location:\n all_conditions_met = False\n if print_failure_reasons:\n print(\"You aren't in the correct location\")\n if check == \"location_has_item\":\n item = preconditions[check]\n if not item.name in game.curr_location.items:\n all_conditions_met = False\n if print_failure_reasons:\n print(\"The %s isn't in this location\" % item.name)\n if check == \"location_has_item_silent\":\n item = preconditions[check]\n if not item.name in game.curr_location.items:\n all_conditions_met = False\n # todo - add other types of preconditions\n return all_conditions_met", "def check_preconditions(preconditions, game, print_failure_reasons=True):\n all_conditions_met = True\n for check in preconditions:\n print(check)\n if check == \"inventory_contains\":\n item = preconditions[check]\n if not game.is_in_inventory(item):\n all_conditions_met = False\n if print_failure_reasons:\n print(\"You don't have the %s\" % item.name)\n if check == \"in_location\":\n location = preconditions[check]\n if not game.curr_location == location:\n all_conditions_met = False\n if print_failure_reasons:\n print(\"You aren't in the correct location\")\n if check == \"location_has_item\":\n item = preconditions[check]\n if not item.name in game.curr_location.items:\n all_conditions_met = False\n if print_failure_reasons:\n print(\"The %s isn't in this location\" % item.name)\n if check == \"block_gone\":\n item = preconditions[check]\n if item.name in game.curr_location.items:\n all_conditions_met = False\n if print_failure_reasons:\n print(\"The %s is still blocking the way\" % item.name)\n # todo - add other types of preconditions\n return all_conditions_met", "def conditions():\n pass", "def test_check_raise_exception(self):\n grader = Notebook(tests_dir=TEST_FILES_PATH + \"tests\")\n global_env = 0\n for q_path in glob(TEST_FILES_PATH + \"tests/*.py\"):\n q = os.path.split(q_path)[1][:-3]\n self.assertRaises(AttributeError,\n lambda: grader.check(q, global_env=global_env))", "def checkFood(self, food):\n pass", "def triple_assert_check(self, exception, method, df, *args, **kwargs):\n self.assertRaises(exception,\n method,\n df,\n *args)\n self.assertRaises(exception,\n plp.validate_for_django,\n df,\n *args)\n self.assertRaises(exception,\n plp.to_django,\n df,\n *args,\n validate=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test of cooking the same product twice. Test passed if second cooking of same product raise ValueError
def test_cook_twice(cook_not_busy, product_for_cook): cook_not_busy.cook_dish(product_for_cook) with pytest.raises(ValueError): cook_not_busy.cook_dish(product_for_cook)
[ "def test_not_like_product_twice(self):\n\n # create initial product\n self.test_like_product()\n\n # Attempt to like the product again\n url = \"/products/1/like\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.post(url, None, format='json')\n json_response = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json_response[\"message\"],\n \"You have already liked that product.\")", "def test_item_creation_twice(self):\n # create an item\n self.test_shoppingitem()\n # create the same item twice\n res2 = self.client().post(\"/shoppinglists/1/items\",\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.shoppingitem)\n self.assertIn(\"Item name already exists\", str(res2.data))", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def test_run_repeatability(self):\n self.assertEqual(TestTrain.hash_100_steps_1, TestTrain.hash_100_steps_2)", "def test_create_duplicate_vendor_product_id_under_same_vendor(self):\n duplicated_vendor_product_id = '1'\n inventory1 = Inventory.objects.create(\n vendor=self.vendor,\n product=self.product,\n vendor_product_id=duplicated_vendor_product_id,\n )\n product2 = Product.objects.create(name='Second Product')\n with self.assertRaises(IntegrityError):\n inventory2 = Inventory.objects.create(\n vendor=self.vendor,\n product=product2,\n vendor_product_id=duplicated_vendor_product_id,\n )", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_perform_trade_with_duplicates(self):\n self.center.hit(None)\n self.center.chose_own_card(self.ranch)\n self.center.chose_other_card(self.cafe)\n # More type() trickery here so we're not relying on internal deck implementation.\n self.assertEqual(\n [type(x) for x in sorted(self.player.deck)],\n [type(x) for x in [self.wheat, self.ranch2, self.cafe, self.center]]\n )\n self.assertEqual(\n [type(x) for x in sorted(self.player2.deck)],\n [type(x) for x in [self.ranch, self.bakery, self.cafe2, self.stadium]]\n )\n self.assertEqual(self.game.state_cards, [])\n self.assertEqual(self.game.state, self.game.STATE_PURCHASE_DECISION)", "def test_repeatable(self):\n print('\\n>>> running: test_repeatable')\n msg = bytearray(get_random_bytes())\n\n first_digest = sha1.sha1(bytes(msg))\n second_digest = sha1.sha1(bytes(msg))\n\n print('... test_repeatable: checking for identical digests')\n self.assertEqual(first_digest, second_digest)\n\n print('... test_repeatable: success')", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def test_6(self):\n toothpaste = Store.Product(11, \"toothpaste\", \"dental\", 2, 4)\n milk = Store.Product(12, \"milk\", \"dairy\", 2, 3)\n eggs = Store.Product(14, \"eggs\", \"dairy\", 2, 2)\n apple_juice = Store.Product(13, \"apple juice\", \"drink\", 1, 1)\n\n s = Store.Store()\n s.add_product(toothpaste)\n s.add_product(milk)\n s.add_product(eggs)\n s.add_product(apple_juice)\n\n henry = Store.Customer(\"henry\", \"mrh\", False)\n s.add_member(henry)\n\n s.add_product_to_member_cart(11, \"mrh\")\n s.add_product_to_member_cart(12, \"mrh\")\n s.add_product_to_member_cart(14, \"mrh\")\n self.assertAlmostEqual(s.check_out_member(\"mrh\"), 6.42, \"not the correct checkout amount\")", "def test_duplicate_ratings_not_allowed(self):\n\n # Create initial ratings\n self.test_rate_product()\n\n # Test rating our product again\n url = \"/products/1/rate\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.post(url, None, format='json')\n json_response = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json_response[\"message\"],\n \"You've already rated this product.\")", "def test_save_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person = next(test_store.get_by(name=\"Andy\"))\n person.name = \"Pandy\"\n\n with pytest.raises(NotUniqueException):\n test_store.save(person)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_add_same(self):\n item1 = Item('item1', 10)\n item2 = Item('item1', 10)\n self.cart.add(item1)\n self.cart.add(item2)\n self.assertEqual(len(self.cart), 1)", "def test_cart_creation_duplicate_name(self):\n cart_name = 'cart name'\n self.cart_item_manager.create_cart('123', cart_name, False)\n self.cart_item_manager.create_cart('124', cart_name, False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.create_cart('123', cart_name, False)", "def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])", "def test_update_cart_name_duplicate(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.create_cart(user_id, 'Cart2', False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart2'})", "def test_duplicate_entries(self):", "def test_add_already_present(self):\n food_item = self.create_a_food_item()\n # remove an entry from the frozen\n self.shelves['frozen'].food_dict.popitem()\n rc = process_new_item(self.shelves, food_item)\n self.assertEqual(rc, NewItemStatus.ok)\n food_item_dup = self.create_a_food_item()\n food_item_dup.temp = 'hot'\n rc = process_new_item(self.shelves, food_item_dup)\n self.assertEqual(rc, NewItemStatus.already_shelved)", "def test_product_validation(self):\n\n Product.objects.create(\n name=\"product1\",\n category=self.cakes1,\n unit=self.gram1,\n caffe=self.filtry\n )\n\n with self.assertRaises(Exception):\n Product.objects.create(\n name=\"product1\",\n category=self.cakes1,\n unit=self.gram1,\n caffe=self.kafo\n )\n\n with self.assertRaises(Exception):\n Product.objects.create(\n name=\"product1\",\n category=self.cakes,\n unit=self.gram1,\n caffe=self.filtry\n )\n\n with self.assertRaises(Exception):\n Product.objects.create(\n name=\"product1\",\n category=self.cakes1,\n unit=self.gram,\n caffe=self.filtry\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test of cooking by busy cook Test passed if busy cook raise a CustomWarning
def test_busy_cook(cook_busy, product_for_cook): with pytest.raises(CustomWarning): assert cook_busy.cook_dish(product_for_cook)
[ "def test_cook_twice(cook_not_busy, product_for_cook):\n\n cook_not_busy.cook_dish(product_for_cook)\n with pytest.raises(ValueError):\n cook_not_busy.cook_dish(product_for_cook)", "def test_cook_set_free(cook_busy, product_for_cook):\n cook_busy.set_free(True)\n # if product needs to be cooked\n assert product_for_cook.get_need_cook_status() is True\n cook_busy.cook_dish(product_for_cook)\n assert product_for_cook.get_need_cook_status() is False", "def test_eat_unhealthy(self):\n \tself.assertEqual(\n\t\t\teat(\"pizza\", isHealthy=False),\n\t\t\t\"I'm eating pizza, because YOLO!\"\n \t)", "def test_work_on_absent_warning_sickness(self):\n\n vals = {\n 'check_in': datetime.datetime(2018, 10, 15, 9),\n 'check_out': datetime.datetime(2018, 10, 15, 10),\n 'employee_id': self.employee.id,\n }\n self.env['hr.attendance'].create(vals)\n\n self.timesheet.check_warnings()\n\n expected = [\n (datetime.date(2018, 10, 15), 'Arbeitszeit an einem Urlaubs-, Feier- und Krankenstandstag'),\n (datetime.date(2018, 10, 27), 'Arbeitszeit am Wochenende'),\n ]\n self.compare_warnings(expected)", "def test_breeding_failed_carn(self):\n nt.assert_equal(self.carn.breeding(1), None)", "def test_notifyDoneNoisy(self):\n self.noisyAttemptMgr.notifyDone(\"noisyRelayer\")\n self.assertTrue(self.eventLog)", "def verifyBuckling(self):\n pass", "def test_fails_when_no_warning(self):\r\n with assertions.assert_raises(AssertionError):\r\n with assertions.assert_warns():\r\n pass", "def test_cancel_duty(self):\n pass", "def test_cliches_write_good_basic(self):\n assert chk.check_cliches_write_good(\"\"\"No cliches here.\"\"\") == []\n # use one of the example cliches to verify basic functionality\n assert chk.check_cliches_write_good(self.l_write_good) != []\n assert \"cliches.write_good\" in chk.check_cliches_write_good(\n self.l_write_good)[0]", "def test_outright():\n\n _do_test(\"tests/resources/conduit/basta_bar.xpi\",\n conduit.test_conduittoolbar,\n failure=True,\n require_install=True,\n set_type=PACKAGE_EXTENSION)", "def test_bookkeep(self) :\n\t\tpass", "def test_new_bid_miss(self):", "def test_download_warning(self, inst_dict):\n\n test_inst, date = initialize_test_inst_and_date(inst_dict)\n\n with warnings.catch_warnings(record=True) as war:\n test_inst.download(date, date)\n\n assert len(war) >= 1\n categories = [war[j].category for j in range(0, len(war))]\n assert UserWarning in categories\n return", "def test_electric_charge_warning(argument, expected_warning):\n with pytest.warns(expected_warning, message=(\n f\"electric_charge({repr(argument)}) is not issuing a \"\n f\"{expected_warning}.\")):\n electric_charge(argument)", "def test_cancelled_ticket_is_held(self):\n pass", "def test_initNoisy(self):\n self.assertTrue(self.noisyAttemptMgr.noisy)", "def requires_cooling(self, heating_state, cooling_state):\n pass", "def test_overdue(self):\n self.assertTrue(task_details_obtained.is_overdue())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test of changing state of cook. Busy cook set to free and then tries to cook the dish. Cooking should be successful (product.get_need_cook_status should be False)
def test_cook_set_free(cook_busy, product_for_cook): cook_busy.set_free(True) # if product needs to be cooked assert product_for_cook.get_need_cook_status() is True cook_busy.cook_dish(product_for_cook) assert product_for_cook.get_need_cook_status() is False
[ "def test_change_recipe(self):\n c1_data, c2_data, pr = self.create_pr_data()\n c1_data, c2_data, pr = self.create_pr_data()\n self.set_counts()\n pr.save()\n self.compare_counts(events=1, jobs=2, ready=1, prs=1, active=2, active_repos=1)\n\n new_recipe = utils.create_recipe(name=\"New recipe\",\n user=self.build_user,\n repo=self.repo,\n branch=self.branch,\n cause=models.Recipe.CAUSE_PULL_REQUEST)\n pr_recipe = models.Recipe.objects.filter(cause=models.Recipe.CAUSE_PULL_REQUEST).latest()\n new_recipe.filename = pr_recipe.filename\n new_recipe.save()\n for dep in pr_recipe.depends_on.all():\n new_recipe.depends_on.add(dep)\n pr_recipe.current = False\n pr_recipe.save()\n\n self.set_counts()\n pr.save()\n self.compare_counts()", "def test_pending_state(self):\n\n # tanks low level\n user_input = [1, 1]\n water_weight = WATER_TANK_WEIGHT_MAX - random.randint(0, WATER_MIN_WEIGHT - 1) # lowest than minimal value\n beans_weight = BEANS_TANK_WEIGHT_MAX - random.randint(0, BEANS_MIN_WEIGHT - 1)\n self.coffee_machine.water_tank = water_tank = WaterTank()\n self.coffee_machine.bean_tank = beans_tank = CoffeeBeanTank()\n water_tank.decrease_weight(water_weight)\n beans_tank.decrease_weight(beans_weight)\n self.assertEqual(beans_tank.status, 0)\n self.assertEqual(water_tank.status, 0)\n self.coffee_machine.waiting.run()\n self.assertTrue(isinstance(self.coffee_machine.waiting.next(), PendingSupply))\n self.assertEqual(self.coffee_machine.is_pending, True)\n # refill tanks\n with patch('builtins.input', side_effect=user_input):\n self.coffee_machine.pending_supply.run()\n # supply\n self.assertEqual(self.coffee_machine.beans_tank.status, 1)\n self.assertEqual(self.coffee_machine.water_tank.status, 1)\n self.assertFalse(self.coffee_machine.is_pending)\n self.assertTrue(isinstance(self.coffee_machine.pending_supply.next(), Waiting))", "def test_cook_twice(cook_not_busy, product_for_cook):\n\n cook_not_busy.cook_dish(product_for_cook)\n with pytest.raises(ValueError):\n cook_not_busy.cook_dish(product_for_cook)", "def test_update_state1(self):\n pass", "def test_update_state(self):\n pass", "def test_update_state2(self):\n pass", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def test_update_contract_status(self):\n pass", "def test_update_state3(self):\n pass", "def updateState(self):\n\n if ('cutting' in self.step_ops) and (self.cut_state.user_cutting):\n self.step_ops['cutting'] = True\n \n if ('cooking' in self.step_ops) and (self.cut_state.user_cooking):\n self.step_ops['cooking'] = True\n\n # TODO: add the rest of the operations\n\n advance = True\n\n # Check if ALL operations are complete\n for op in self.step_ops:\n if self.step_ops[op] == False:\n advance = False\n break\n\n if advance:\n self.nextStep()", "def test_update_state4(self):\n pass", "def change_availability():\n artwork_sold = get_artwork_name()\n if not controls_utils.artwork_exists(artwork_sold):\n print('No record of that piece of art. ')\n else:\n artist = controls_utils.name_of_artist(artwork_sold)\n if not controls_utils.artwork_available(artwork_sold, artist):\n print('Sorry that piece has already been sold. ')\n else:\n response = input('Mark ' + artwork_sold + ' as sold? Y or N ')\n if response.upper() == 'Y':\n mark_as_sold(artwork_sold)\n while not controls_utils.response_affirmative(response):\n response = input('Are you sure you want to mark '\n + artwork_sold + ' by ' + artist + ' as sold? Y or N or press X to escape ')\n if response.upper() == 'X':\n break\n elif response.upper() == 'N':\n break", "def test_charge_correct_for_fiction_after_close(self):\n rental = create_test_rental(\n book=self.book2,\n customer=self.user1,\n date_borrowed=\"2019-05-22 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"9.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)", "def test_grinding_coffee_beans_state(self):\n\n coffee_type = self.coffee_type[1]\n self.coffee_machine.beans_tank = beans_tank = CoffeeBeanTank()\n self.coffee_machine.chosen_coffee_name = coffee_type\n self.coffee_machine.chosen_coffee_data = coffee_data = COFFEE_TYPES.get(coffee_type)\n self.coffee_machine.grinding_beans.run()\n self.assertEqual(beans_tank.weight, (BEANS_TANK_WEIGHT_MAX - coffee_data.get('beans_weight')))", "def test_ensure_state_change_if_needed(self, setState, commit):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('NEW_FILES')\n setState.assert_called()", "def test_update_shopping_cart_item_hold(self):\n pass", "def test_pre_burn_actions(self):\n p = self.make('Prescription')\n day_state = p.day_state\n self.assertFalse(day_state.pre_actions)\n day_state.pre_actions = True\n\n # try marking pre-burn actions as complete without any actions\n with self.assertRaises(ValidationError) as cm:\n day_state.full_clean()\n self.assertEqual(cm.exception.messages_dict, {\n 'pre_actions': [\"Pre-burn actions cannot be marked as \"\n \"complete unless there is at least one \"\n \"pre-burn action associated with the burn.\"]})\n\n # try marking pre-burn actions as not applicable without any actions\n day_state.pre_actions = None\n day_state.full_clean()\n\n # try marking pre-burn actions as complete without filling in details\n risk = Risk.objects.filter(prescription=p)[0]\n action = Action.objects.create(risk=risk, pre_burn=True)\n\n day_state = p.day_state\n self.assertFalse(day_state.pre_actions)\n day_state.pre_actions = True\n\n with self.assertRaises(ValidationError) as cm:\n day_state.full_clean()\n self.assertEqual(cm.exception.messages_dict, {\n 'pre_actions': [\"Pre-burn actions cannot be marked as complete\"\n \"unless all pre-burn actions have details.\"]})\n\n action.details = \"Test details\"\n action.save()\n\n self.assertFalse(day_state.pre_actions)\n day_state.pre_actions = True\n day_state.full_clean()\n day_state.save()", "def test_cancelling_event_cancels_open_block_bookings(self):\n for user in baker.make_recipe('booking.user', _quantity=3):\n block = baker.make_recipe(\n 'booking.block', block_type__event_type=self.event.event_type,\n user=user\n )\n baker.make_recipe(\n 'booking.booking', event=self.event, status=\"OPEN\", block=block,\n paid=True\n )\n\n for booking in Booking.objects.filter(event=self.event):\n self.assertIsNotNone(booking.block)\n self.assertEqual(booking.status, 'OPEN')\n\n self._post_response(\n self.staff_user, self.event, {'confirm': 'Yes, cancel this event'}\n )\n for booking in Booking.objects.filter(event=self.event):\n self.assertIsNone(booking.block)\n self.assertEqual(booking.status, 'CANCELLED')", "async def test_product_reconfigure_configure_busy(\n self, client: aiokatcp.Client, mocker\n ) -> None:\n await client.request(\"product-configure\", \"product1\", CONFIG)\n async with self._product_configure_slow(client, mocker, \"product2\"):\n await client.request(\"product-reconfigure\", \"product1\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats the output of a transaction receipt to its proper values
def output_transaction_receipt_formatter(receipt): if receipt is None: return None logs_formatter = compose(functools.partial(map, outputLogFormatter), list) formatters = { 'blockNumber': to_decimal, 'transactionIndex': to_decimal, 'cumulativeGasUsed': to_decimal, 'gasUsed': to_decimal, 'logs': lambda l: logs_formatter(l) if is_array(l) else l, } return { key: formatters.get(key, identity)(value) for key, value in receipt.items() }
[ "def print_receipt(self) -> typing.List[str]:\n lines = []\n euro_total=0\n usd_total=0\n gbp_total=0\n\n for item in self._items.items():\n euro_price = self._get_product_price(item[0]) * item[1]\n usd_price = self.get_price_in_currency(euro_price,\"USD\")\n gbp_price = self.get_price_in_currency(euro_price,\"GBP\")\n\n euro_total += euro_price\n usd_total += usd_price\n gbp_total += gbp_price\n\n euro_price_string = \"€%.2f\" % euro_price\n usd_price_string = \"$%.2f\" % usd_price\n gbp_price_string = \"£%.2f\" % gbp_price\n \n lines.append(item[0] + \" - \" + str(item[1]) + ' - ' + euro_price_string + ' - ' + \\\n usd_price_string + ' - ' + gbp_price_string)\n \n euro_total_str=\"€%.2f\" % euro_total\n usd_total_str=\"$%.2f\" % usd_total\n gbp_total_str=\"£%.2f\" % gbp_total\n\n lines.append(\"Total = \"+euro_total_str+ ' - ' + usd_total_str + ' - ' + gbp_total_str)\n logging.info(str(datetime.now())+': Receipt =' +str(lines))\n return lines", "def convert_trans_to_string(self, transaction):\r\n #note, repr will not work because it doesn't remove curly brackets and colons\r\n record_list = []\r\n for mode, trans in transaction.iteritems():\r\n record_list.append(str(\"mode: \" + mode + \" \"))\r\n for product,quantity in trans.iteritems():\r\n record_list.append(str(product + \":\"))\r\n record_list.append(str(quantity) + \" \")\r\n \r\n record_string = \"\".join(record_list) + \"\\n\"\r\n return record_string", "def print_receipt(self, order):\n car = self.__car_service.get_car_by_license(order[\"License\"])\n customer = self.__customer_service.get_customer_by_kt(order[\"Kt\"])\n\n receipt = \"\"\"\nCustomer\n\n PPN/Kt: {i:<30}\n Name: {name:<30}\n E-Mail: {mail:<30}\n Phone number: {phone:<30}\n Driving license: {license:<30}\n Age: {age:<30}\n Country: {country:<30}\n Address: {address:<30}\n From day: {from_day:<8}\n To day: {to_day:<8}\n\n Car | Per day | Quantity | Total\n -----------------------------------------------------------------------------------\n\n License plate: {car_license:<20}\n Model: {car_model:<20}\n Type: {car_type:<20}\n Class: {car_class:<20}\n Seats: {car_seats:<20}\n 4x4: {car_fwd:<20}\n Transmission: {car_transmission:<20}\n Price of car: {car_price:<20}{car_price:>6} kr.{order_days:^20}{car_order_price:>7} kr.\n Insurance: {order_insurance:<20}{insurance_price:>6} kr.{order_days:^20}{insurance_order_price:>7} kr.\n Penalty: {order_penalty:>57} kr.\n\n Total price: ------------------------------------------------- {order_price} kr.\n\n \"\"\"\n if order[\"Insurance\"] == \"No\":\n i_price = 0\n t_price = 0\n else:\n i_price = int(car[\"Price\"]) * 0.25\n t_price = (int(car[\"Price\"]) * 0.25) * int(order[\"Days\"])\n\n output = receipt.format(i=customer[\"Passport number\"], name=customer[\"Name\"], mail=customer[\"Mail\"],\n address=customer[\"Address\"], country=customer[\"Country\"],\n license=customer[\"license\"],\n age=customer[\"Age\"], phone=customer[\"Phone number\"], car_license=car[\"License\"],\n car_model=car[\"Model\"], car_type=car[\"Type\"], car_class=car[\"Class\"],\n car_seats=car[\"Seats\"], car_fwd=car[\"4x4\"], car_transmission=car[\"Transmission\"],\n car_price=car[\"Price\"], price=order[\"Price\"], order_insurance=order[\"Insurance\"],\n order_days=order[\"Days\"], order_price=order[\"Total price\"],\n car_order_price=(int(car[\"Price\"]) * int(order[\"Days\"])),\n insurance_order_price=round(t_price),\n insurance_price=round(i_price), from_day=order[\"From date\"], to_day=order[\"To date\"],\n order_penalty=round(float(order[\"Penalty\"])))\n print(output)", "def output(self):\n \n str_title_len = 50\n str_date_len = 40\n str_purpose_len = 30\n str_price_len = 10\n str_payer_len = 20\n #str_comment_len =\n \n if len(self.title) > (str_title_len - 2):\n out_title = self.title[:str_title_len - 2] + \" |\"\n else:\n out_title = self.title + (\" \" * (str_title_len - len(self.title) - 2)) + \" |\"\n \n # if date is presented with <datetime> object, then\n # then output it in format %d.%m.%y (31.12.99)\n if type(self.date) is datetime.datetime:\n out_date = \" \" + datetime.datetime.strftime(\"%d.%m.%y\") + \" |\"\n # or output as string otherwise\n else:\n if len(self.date) > (str_date_len - 4):\n out_date = \" \" + self.date[:str_date_len - 4] + \" |\"\n else:\n out_date = \" \" + self.date + (\" \" * (str_date_len - len(self.date) - 4)) + \" |\"\n \n if len(self.purpose) > (str_purpose_len - 4):\n out_purpose = \" \" + self.purpose[:str_purpose_len - 4] + \" |\"\n else:\n out_purpose = \" \" + self.purpose + (\" \" * (str_purpose_len - len(self.purpose) - 4)) + \" |\"\n \n # enormous sums aren't supported (over 9999999 at the moment)\n if len(str(self.price)) > (str_price_len - 4):\n raise Exception\n out_price = (' ' * (str_price_len - len(str(self.price)) - 4) ) + str(self.price) + ' |'\n \n if len(self.payer) > (str_payer_len - 2):\n out_payer = \" \" + self.payer[:str_payer_len - 2]\n else:\n out_payer = \" \" + self.payer + (\" \" * (str_payer_len - len(self.payer) - 2))\n \n out_line = out_title + out_date + out_purpose + out_price + out_payer\n return out_line", "def printPayment(self):\n print self.output()", "def receipt_text(self, **kw):\n return self._text(self._receipt_template, **kw)", "def receipt_body(last_refreshed,latest_close,recent_high, recent_low, recent_52high,recent_52low):\n print(\"-------------------------\")\n print(f\"LATEST DAY: {last_refreshed}\")\n print(f\"LATEST CLOSE: {to_usd(float(latest_close))}\")\n print(f\"RECENT HIGH: {to_usd(float(recent_high))}\")\n print(f\"RECENT LOW:{to_usd(float(recent_low))}\")\n print(f\"52 WEEK HIGH:{to_usd(float(recent_52high))}\")\n print(f\"52 WEEK LOW:{to_usd(float(recent_52low))}\")\n print(\"-------------------------\")", "def printTransactionReceipt(rcp):\n if not LOGGER.isEnabledFor(20):\n return\n addr = ''\n if rcp['contractAddress']:\n addr = '\\n address: %s' % rcp['contractAddress']\n LOGGER.info('transaction info:\\n'\n ' block: %d\\n'\n ' block hash: %s\\n'\n ' tx hash: %s\\n'\n ' gas used: %d\\n'\n ' status: %s - %s%s'\n '' % (rcp['blockNumber'], str(rcp['blockHash']),\n str(rcp['transactionHash']),\n rcp['gasUsed'], rcp['status'],\n 'OK' if rcp['status'] == '0x1' else 'REVERTED',\n addr))", "def format_coin_output(coin):\n coin_output1 = \"Grabbing latest data for *\" + coin['name'] + \"*\\n\"\n coin_output2 = \"```{:20s}\\t${:.2f}\\n\".format(\"Price USD\",float(coin['price_usd']))\n coin_output3 = \"{:20s}\\t{:.8f}\\n\".format(\"Price BTC\",float(coin['price_btc']))\n coin_output4 = \"{:20s}\\t${:.2f}\\n\".format(\"Market Cap\",float(coin['market_cap_usd']))\n coin_output5 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 1hr\",float(coin['percent_change_1h']))\n coin_output6 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 24hr\",float(coin['percent_change_24h']))\n coin_output7 = \"{:20s}\\t{:.2f}%\\n```\".format(\"Change 7d\",float(coin['percent_change_7d']))\n return (coin_output1+coin_output2+coin_output3+coin_output4+coin_output5+coin_output6+coin_output7)", "def pay_formatter(self, pay_item):\n return {\n \"payment_id\": pay_item[0],\n \"amount_paid\": pay_item[1],\n \"payment_info\": pay_item[2],\n \"approved\": pay_item[3],\n \"pay_date\": pay_item[4],\n \"loan_id\": pay_item[5],\n \"farmer_id\": pay_item[6]\n }", "def _get_formatted_output(self, expected, actual):\n msg = ''\n mismatch_format = '{}: Expected {} Actual {}. '\n if actual is None:\n raise BTUtilsError('None is not expected.')\n for key in expected.keys():\n if expected[key] != actual[key]:\n msg += mismatch_format.format(key, expected[key], actual[key])\n return msg", "def invoice(self):\r\n result ='\\n'.join([f'{product.name} - {product.price}' for product in self.products])\r\n result += f'\\nThe total price is {self.total_price}! \\nThe delivery will be in {self.client.address}!'\r\n return result", "def formatText(self,exchange_group):\n text = []\n try:\n for exchange,val in exchange_group.items():\n if len(exchange.split()) >= 4:\n processed = \"\\n\".join([\" \".join(exchange.split()[:2]),\" \".join(exchange.split()[2:])])\n text.append(\"- {0} {1} of {2}\".format(val,self.parser.UNITS[exchange],processed))\n else: \n text.append(\"- {0} {1} of {2}\".format(val,self.parser.UNITS[exchange],exchange))\n except AttributeError:\n if len(exchange_group) >= 35:\n processed = \",\\n\".join([exchange_group.split(\",\")[0],exchange_group.split(\",\")[1]])\n text.append(processed)\n else:\n text.append(exchange_group)\n return \"\\n\".join(text)", "def TransactionDescription(self) -> str:", "def format_tuition(self, data):\n d = u'$%.2f' % data\n return d.replace('.00','')", "def serialize_receipt(receipt: TxReceipt) -> bytes:\n prepared_receipt = prepare_receipt(receipt)\n encoded_receipt = rlp.encode(prepared_receipt)\n\n receipt_type = HexBytes(receipt.get(\"type\", 0))\n if receipt_type == HexBytes(0):\n return encoded_receipt\n\n buffer = HexBytes(receipt_type) + encoded_receipt\n return rlp.encode(buffer)", "def __str__(self): # get for basket/cart\n if self.currency_type == 'eur':\n return \"Items in basket: {} \\nBasket Total: €{} \\n\".format(', '.join(self.total_products), self.amount)\n elif self.currency_type == 'usd':\n return \"Items in basket: {} \\nBasket Total: ${} \\n\".format(', '.join(self.total_products), self.amount)\n elif self.currency_type == 'gbp':\n return \"Items in basket: {} \\nBasket Total: £{} \\n\".format(', '.join(self.total_products), self.amount)", "def outputBlockFormatter(block):\n\n # Transform to number\n block[\"gasLimit\"] = to_decimal(block[\"gasLimit\"])\n block[\"gasUsed\"] = to_decimal(block[\"gasUsed\"])\n block[\"size\"] = to_decimal(block[\"size\"])\n block[\"timestamp\"] = to_decimal(block[\"timestamp\"])\n\n if block.get(\"number\"):\n block[\"number\"] = to_decimal(block[\"number\"])\n\n block[\"difficulty\"] = to_decimal(block[\"difficulty\"])\n block[\"totalDifficulty\"] = to_decimal(block[\"totalDifficulty\"])\n\n if is_array(block.get(\"transactions\")):\n for item in block[\"transactions\"]:\n if not is_string(item):\n item = output_transaction_formatter(item)\n\n return block", "def formatResult(self, result):\r\n return str(result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats the output of a block to its proper values
def outputBlockFormatter(block): # Transform to number block["gasLimit"] = to_decimal(block["gasLimit"]) block["gasUsed"] = to_decimal(block["gasUsed"]) block["size"] = to_decimal(block["size"]) block["timestamp"] = to_decimal(block["timestamp"]) if block.get("number"): block["number"] = to_decimal(block["number"]) block["difficulty"] = to_decimal(block["difficulty"]) block["totalDifficulty"] = to_decimal(block["totalDifficulty"]) if is_array(block.get("transactions")): for item in block["transactions"]: if not is_string(item): item = output_transaction_formatter(item) return block
[ "def pretty_data_block(data_block):\n return '\\nCoding: {0[0]}\\n' \\\n 'Function: {0[1]}\\n' \\\n 'Description: {0[2]}\\n' \\\n 'Value: {0[3]} {0[4]}\\n' \\\n 'Subunit: {0[5]}\\n' \\\n 'Tariff: {0[6]}\\n' \\\n 'Storage Number: {0[7]}\\n'.format(data_block)", "def output(self, params=None):\n result = self.pretty_print(params=params)\n result = PythonBlock(result, filename=self.input.filename)\n return result", "def printBlock(self):\n print self.start, self.end", "def _block(cls, name, val):\n res = ['begin ' + name]\n res += cls._value(val)\n res += ['end ' + name]\n return res", "def __str__(self):\n block_string = ''.join([\n \"index: \" + str(self.index) + '\\n',\n \"timestamp: \" + str(self.timestamp) + '\\n',\n \"difficulty: \" + str(self.difficulty) + '\\n',\n \"nonce: \" + str(self.nonce) + '\\n',\n \"transaction: \" + str(self.transaction) + '\\n',\n \"previousHash: \" + self.previousHash + '\\n',\n \"hash: \" + self.hash + '\\n'\n ])\n return block_string", "def format_blocks(self):\n\n block_text = []\n for el, text in self._block_text.items():\n self.soft_break(el, text)\n content = ''.join(text)\n if content:\n block_text.append((content, self.additional_context + self.construct_selector(el)))\n return block_text", "def block_response_to_string(block_response):\n string = ''\n if block_response.get('Block', {}).get('IonText') is not None:\n string += 'Block: ' + value_holder_to_string(block_response['Block']['IonText']) + ', '\n\n if block_response.get('Proof', {}).get('IonText') is not None:\n string += 'Proof: ' + value_holder_to_string(block_response['Proof']['IonText'])\n\n return '{' + string + '}'", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def format_coin_output(coin):\n coin_output1 = \"Grabbing latest data for *\" + coin['name'] + \"*\\n\"\n coin_output2 = \"```{:20s}\\t${:.2f}\\n\".format(\"Price USD\",float(coin['price_usd']))\n coin_output3 = \"{:20s}\\t{:.8f}\\n\".format(\"Price BTC\",float(coin['price_btc']))\n coin_output4 = \"{:20s}\\t${:.2f}\\n\".format(\"Market Cap\",float(coin['market_cap_usd']))\n coin_output5 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 1hr\",float(coin['percent_change_1h']))\n coin_output6 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 24hr\",float(coin['percent_change_24h']))\n coin_output7 = \"{:20s}\\t{:.2f}%\\n```\".format(\"Change 7d\",float(coin['percent_change_7d']))\n return (coin_output1+coin_output2+coin_output3+coin_output4+coin_output5+coin_output6+coin_output7)", "def outputLogFormatter(log):\n if log.get(\"blockNumber\"):\n log[\"blockNumber\"] = to_decimal(log[\"blockNumber\"])\n if log.get(\"transactionIndex\"):\n log[\"transactionIndex\"] = to_decimal(log[\"transactionIndex\"])\n if log.get(\"logIndex\"):\n log[\"logIndex\"] = to_decimal(log[\"logIndex\"])\n\n return log", "def process_block(self):\n if self.verbose: print(\"sds011 process block\")\n if self.verbose: print([hex(x) for x in self.block])\n check_sum = 0\n for i in range(2,8):\n check_sum = check_sum + self.block[i]\n check_sum = check_sum & 0xff\n if self.verbose: print(\"Checksum:\",hex(check_sum))\n if check_sum!=self.block[8]:\n message = \"Rcv:\" + hex(self.block[8]) + \" Cal:\" + hex(check_sum)\n self.display.error(message)\n return\n ppm10 = (self.block[4]+256*self.block[5])/10\n ppm2_5 = (self.block[2]+256*self.block[3])/10\n self.display.new_readings(ppm10,ppm2_5)", "def dump(self):\n result = super(QuorumTransactionBlock, self).dump()\n result['BlockNumber'] = self.BlockNumber\n\n return result", "def format_output(self, rendered_widgets):\n return \"%s&nbsp;%s\" % (rendered_widgets[0], rendered_widgets[1])", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def format(self, format_string, module=None, param_dict=None,\n force_composite=False, attr_getter=None):\n\n def set_param(param, value, key, block, format=''):\n \"\"\"\n Converts a placeholder to a string value.\n We fix python 2 unicode issues and use string.format()\n to ensure that formatting is applied correctly\n \"\"\"\n if self.python2 and isinstance(param, str):\n param = param.decode('utf-8')\n # '', None, and False are ignored\n # numbers like 0 and 0.0 are not.\n if not (param in ['', None] or param is False):\n if format.startswith(':'):\n # if a parameter has been set to be formatted as a numeric\n # type then we see if we can coerce it to be. This allows\n # the user to format types that normally would not be\n # allowed eg '123' it also allows {:d} to be used as a\n # shorthand for {:.0f}. If the parameter cannot be\n # successfully converted then the format is removed.\n try:\n if 'f' in format:\n param = float(param)\n if 'd' in format:\n param = int(float(param))\n except ValueError:\n value = u'{%s}' % key\n value = value.format(**{key: param})\n block.add(value)\n # If not_zero block command is used we do not want to mark this\n # block as valid if the parameter is zero.\n # we do of course want to et the parameter in case the block is\n # valid via another route, eg second parameter\n try:\n if block.block_config.not_zero and float(param) == 0:\n return\n except ValueError:\n pass\n block.mark_valid()\n\n # fix python 2 unicode issues\n if self.python2 and isinstance(format_string, str):\n format_string = format_string.decode('utf-8')\n\n if param_dict is None:\n param_dict = {}\n\n block = Block(param_dict, module)\n\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n value = token.group(0)\n if token.group('block_start'):\n # Create new block\n new_block = Block(param_dict, module, block)\n block.add(new_block)\n block = new_block\n elif token.group('block_end'):\n # Close block setting any valid state as needed\n # and return to parent block to continue\n block.set_valid_state()\n if not block.parent:\n raise Exception('Too many `]`')\n block = block.parent\n elif token.group('switch'):\n # a new option has been created\n block.set_valid_state()\n block.switch()\n elif token.group('placeholder'):\n # Found a {placeholder}\n key = token.group('key')\n if key in param_dict:\n # was a supplied parameter\n param = param_dict.get(key)\n if isinstance(param, Composite):\n # supplied parameter is a composite\n if param.get_content():\n block.add(param.copy())\n block.mark_valid()\n else:\n format = token.group('format')\n set_param(param, value, key, block, format)\n elif module and hasattr(module, key):\n # attribute of the module\n param = getattr(module, key)\n if not hasattr(param, '__call__'):\n set_param(param, value, key, block)\n else:\n block.add(value)\n elif attr_getter:\n # get value from attr_getter function\n param = attr_getter(key)\n set_param(param, value, key, block)\n else:\n # substitution not found so add as a literal\n block.add(value)\n elif token.group('literal'):\n block.add(value)\n elif token.group('lost_brace'):\n # due to how parsing happens we can get a lonesome }\n # eg in format_string '{{something}' this fixes that issue\n block.add(value)\n elif token.group('command'):\n # a block command has been found\n block.set_commands(token.group('command'))\n elif token.group('escaped'):\n # escaped characters add unescaped values\n if value[0] in ['\\\\', '{', '}']:\n value = value[1:]\n block.add(value)\n\n if block.parent:\n raise Exception('Block not closed')\n\n # This is the main block if none of the sections are valid use the last\n # one for situations like '{placeholder}|Nothing'\n if not block.valid_blocks:\n block.mark_valid()\n output = block.show()\n\n # post format\n # swap color names to values\n for item in output:\n # ignore empty items\n if not item.get('full_text') and not item.get('separator'):\n continue\n # colors\n color_this = item.get('color')\n if color_this and hasattr(color_this, 'none_setting'):\n # NoneColor so remove.\n del item['color']\n elif color_this and color_this[0] != '#':\n color_name = 'color_%s' % color_this\n threshold_color_name = 'color_threshold_%s' % color_this\n # substitute color\n color_this = (\n getattr(module, color_name, None) or\n getattr(module, threshold_color_name, None) or\n getattr(module.py3, color_name.upper(), None)\n )\n if color_this:\n item['color'] = color_this\n else:\n del item['color']\n output = Composite(output).simplify()\n # if only text then we can become a string\n if not force_composite:\n if len(output) == 0:\n return ''\n elif (len(output) == 1 and list(output[0].keys()) == ['full_text']):\n output = output[0]['full_text']\n\n return output", "def block(cell):\n value=[0,0,cell[2]]\n for i in xrange(2):\n if cell[i] < 3:\n value[i] = 1\n if cell[i] >= 3 and cell[i] < 6:\n value[i] = 2\n if cell[i] >= 6:\n value[i] = 3\n return (\"block\",value[0],value[1],value[2])", "def _print_block_states(self):\n for block in self.blocks:\n print '{0}: {1}'.format(block.name, block.active)", "def convert_textfile(self, input_textfile, output_textfile, block_number):\n input_textfile = './src/data/qvalue_files/' + input_textfile + '.txt'\n\n output_textfile = './src/data/qvalue_files/' + output_textfile + '.txt'\n \"\"\"\n block1\n \"\"\"\n box_conversion_map_1 = {\n 'D1': 'A1',\n 'D2': 'A2',\n 'D3': 'A3',\n 'D4': 'A4',\n 'D5': 'A5',\n 'D6': 'A6',\n 'D7': 'A7',\n 'D8': 'A8',\n 'D9': 'A9'\n }\n \"\"\"\n block2\n \"\"\"\n box_conversion_map_2 = {\n 'D1': 'A7',\n 'D2': 'A8',\n 'D3': 'A9',\n 'D4': 'A10',\n 'D5': 'A11',\n 'D6': 'A12',\n 'D7': 'A13',\n 'D8': 'A14',\n 'D9': 'A15'\n }\n \"\"\"\n block3\n \"\"\"\n box_conversion_map_3 = {\n 'U1': 'B7',\n 'U2': 'B8',\n 'U3': 'B9',\n 'U4': 'B10',\n 'U5': 'B11',\n 'U6': 'B12',\n 'U7': 'B13',\n 'U8': 'B14',\n 'U9': 'B15'\n }\n \"\"\"\n block0\n \"\"\"\n box_conversion_map_0 = {\n 'U1': 'B1',\n 'U2': 'B2',\n 'U3': 'B3',\n 'U4': 'B4',\n 'U5': 'B5',\n 'U6': 'B6',\n 'U7': 'B7',\n 'U8': 'B8',\n 'U9': 'B9'\n }\n box_maps_dict = {0: box_conversion_map_0, 1: box_conversion_map_1, 2: box_conversion_map_2,\n 3: box_conversion_map_3}\n box_conversion_map = box_maps_dict[block_number]\n\n f_read = open(input_textfile, 'r')\n f_write = open(output_textfile, 'w+')\n\n for i in f_read.read().split('\\n'):\n print(i, \"before\")\n for key in box_conversion_map.keys():\n i = i.replace(key + 'x', box_conversion_map[key] + 'x')\n i = i.replace(key + 'z', box_conversion_map[key] + 'z')\n i = i.replace(key + 'N', box_conversion_map[key] + 'N')\n i = i.replace(key + 'E', box_conversion_map[key] + 'E')\n i = i.replace(key + 'W', box_conversion_map[key] + 'W')\n i = i.replace(key + 'S', box_conversion_map[key] + 'S')\n i = i.replace(key + '|', box_conversion_map[key] + '|')\n print(i, \"after\")\n f_write.write(i + '\\n')\n f_read.close()\n f_write.close()", "def format_sampler(val):\n return val" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats the output of a log
def outputLogFormatter(log): if log.get("blockNumber"): log["blockNumber"] = to_decimal(log["blockNumber"]) if log.get("transactionIndex"): log["transactionIndex"] = to_decimal(log["transactionIndex"]) if log.get("logIndex"): log["logIndex"] = to_decimal(log["logIndex"]) return log
[ "def format(self, record, *args, **kwargs):\r\n return logging.Formatter.format(\r\n self, record, *args, **kwargs).replace('\\n', '\\n' + ' ' * 8)", "def format(self, record):\n\t\tif self.color:\n\t\t\ttry:\n\t\t\t\tcat = getattr(record, self.CATEGORY, None)\n\t\t\t\tif not cat:\n\t\t\t\t\tif record.levelname == 'WARN': cat = LOG_WARN\n\t\t\t\t\telif record.levelname == 'ERROR': cat = LOG_ERROR\n\t\t\t\t\telif record.levelname == 'DEBUG': cat = LOG_DEBUG\n\t\t\t\tif cat:\n\t\t\t\t\tcat = cat.lower()\n\t\t\t\t\trecord = copy.copy(record)\n\t\t\t\t\tindexes = getattr(record, self.ARG_INDEX, None)\n\t\t\t\t\tif indexes == None:\n\t\t\t\t\t\trecord.msg = self.colorCategoryToEscapeSequence(cat)+record.msg+self.colorCategoryToEscapeSequence(LOG_END)\n\t\t\t\t\telse:\n\t\t\t\t\t\targs = list(record.args)\n\t\t\t\t\t\tfor index in indexes: args[index] = self.formatArg(cat, args[index])\n\t\t\t\t\t\trecord.args = tuple(args)\n\t\t\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.getLogger('pysys.utils.logutils').debug('Failed to format log message \"%s\": %s'%(record.msg, repr(e)))\n\n\t\treturn super(ColorLogFormatter, self).format(record)", "def format_log(request, message):\n now = datetime.now().replace(microsecond=0)\n log = MESSAGE_LOG_FORMAT % dict(request.META, MESSAGE=message, TIME=now)\n return log + \"\\n\"", "def format(self, record):\n record.message = record.getMessage()\n if self.usesTime():\n record.asctime = self.formatTime(record, self.datefmt)\n try:\n s = self._fmt % record.__dict__\n except UnicodeDecodeError as e:\n # Issue 25664. The logger name may be Unicode. Try again ...\n try:\n record.name = record.name.decode('utf-8')\n s = self._fmt % record.__dict__\n except UnicodeDecodeError:\n raise e\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n try:\n s = s + record.exc_text\n except UnicodeError:\n # Sometimes filenames have non-ASCII chars, which can lead\n # to errors when s is Unicode and record.exc_text is str\n # See issue 8924.\n # We also use replace for when there are multiple\n # encodings, e.g. UTF-8 for the filesystem and latin-1\n # for a script. See issue 13232.\n s = s + record.exc_text.decode(sys.path.getfilesystemencoding(),\n 'replace')\n if re.findall(r\"u'\\\\u\", s):\n s = s.encode('utf-8').decode('unicode_escape')\n\n return s", "def format_result(self):\n return ('{}\\n\\n{}'.format(\n LogParser.format_dict(LogParser.order_dict(self.urls)[:3]),\n LogParser.format_dict(LogParser.order_dict(self.status_codes))))", "def format(self, record):\n message = record.getMessage()\n asctime = self.formatTime(record, self.datefmt)\n name = yellow(record.name)\n\n s = \"%(timestamp)s %(levelname)s %(name)s \" % {\n \"timestamp\": green(\"%s,%03d\" % (asctime, record.msecs), bold=True),\n \"levelname\": self.LEVELS[record.levelname],\n \"name\": name,\n }\n\n if \"\\n\" in message:\n indent_length = len(re_color_codes.sub(\"\", s))\n message = message.replace(\"\\n\", \"\\n\" + \" \" * indent_length)\n\n s += message\n return s", "def test_format_log_line(log_line):\n assert(format_log_line(log_line) == \"2012-09-13 16:04:22 DEBUG SID:34523 \"\n \"BID:1329 RID:65d33 'Starting new session'\")\n assert(format_log_line(log_line) != \"\")", "def logger_format(self) -> str:\n\t\treturn ('%(asctime) -19s | %(levelname) -8s | %(threadName) -10s | '\n\t\t\t\t'%(funcName) -16s | %(message)s')", "def format(self, record: logging.LogRecord = None) -> str:\n # s = super().format(record)\n s = None\n e = {}\n e['id'] = uuid.uuid4().hex\n e['message'] = record.getMessage()\n # log.warning('record.message: %r', record.getMessage())\n # log.warning('record.args: %r', record.args)\n e['created'] = record.created\n e['priority'] = record.levelname\n e['args'] = record.args\n e['source_code'] = {}\n e['source_code']['pathname'] = record.pathname\n e['source_code']['funcName'] = record.funcName\n e['source_code']['lineno'] = record.lineno\n ctx = record.args.get(PIPELINE_CONTEXT_KEY, None)\n if ctx:\n e[PIPELINE_CONTEXT_KEY] = ctx.toDict()\n # use array enclosure a[] to mainain the log file\n # yaml compliant as new events are appended\n # - event1:\n # - event2:\n # - ...\n a = [e]\n s = yaml.dump(a)\n return s", "def format(self, record):\n record.message = indent_string(record.getMessage())\n if \"%(asctime)\" in self._fmt:\n record.asctime = self.formatTime(record, self.datefmt)\n s = self._fmt % record.__dict__\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n s = \"{0} Exception:\\n {1}\".format(s, indent_string(record.exc_text))\n return s", "def _log_format_default(self):\n return '%(message)s'", "def formatLogs(logs,format):\n formattedLogs=[]\n \n if(format.__eq__(\"json\")):\n for log in logs:\n formattedLogs.append(json.dumps(dict(log)))\n return formattedLogs\n elif(format.__eq__(\"xml\")):\n for log in logs:\n formattedLogs.append(dict2xml.dict2xml(dict(log)))\n return formattedLogs\n else:\n return logs", "def get_formatted_task_log(self):\n try:\n log = requests.get(self.gs_base_url + \"/out.log\").content\n except:\n return [f\"####-##-## ##:##:## Task ID: {self.name}\\n\"]\n return (f\"####-##-## ##:##:## Task ID: {self.name}\\n\" + log.decode('utf-8')).splitlines()", "def logWrite(self):\n strContainer = \"\"\n for item in self.strbuffer:\n if self.TSflag:\n strContainer += \"[\" + self.getFormattedTime(item[1]) + \"] \" + item[0]\n else:\n strContainer += item[0]\n if item[2]:\n self.TSflag = True\n else:\n self.TSflag = False\n with open(self.LOGPATH + self.logfile, \"a+\") as glogfile:\n glogfile.write(strContainer)\n self.strbuffer = []", "def _format_msg(self, format_str, *args):\r\n return u\"{0} - - [{1}] {2}\\n\".format(\r\n self.client_address[0],\r\n self.log_date_time_string(),\r\n format_str % args\r\n )", "def _format_msg(self, format_str, *args):\n if not args:\n format_str = six.moves.urllib.parse.unquote(format_str)\n return \"{} - - [{}] {}\\n\".format(\n self.client_address[0],\n self.log_date_time_string(),\n format_str % args\n )", "def log_format_info(event_str, data = {}):\n\tcheck_type(event_str, StringType)\n\tcheck_type(data, DictType)\n\n\tinfo = []\n\tfor k in data:\n\t\tinfo.append('{0}: {1}'.format(k, data[k]))\n\treturn '{0}. Info: {1}'.format(event_str, ', '.join(info))", "def log_message(self, format, *args):\n if self.headers:\n xff = self.headers.getheader('X-Forwarded-For', '-')\n xgo = self.headers.getheader('X-Grafana-Org-Id', '-')\n ua = self.headers.getheader('User-Agent', '-')\n\n logging.info(\"%s - - [%s] %s [X-Forwarded-For: %s, X-Grafana-Org-Id: %s, User-Agent: %s]\" %\n (self.client_address[0], self.log_date_time_string(), format % args, xff, xgo, ua))\n else:\n logging.info(\"%s - - [%s] %s\" %\n (self.client_address[0], self.log_date_time_string(), format % args))", "def pretty_end_log(title):\n output = '>' * 10 + ' ' + title + ' ' + '<' * 10 + '\\n\\n'\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats the input of a whisper post and converts all values to HEX
def inputPostFormatter(post): post["ttl"] = from_decimal(post["ttl"]) post["workToProve"] = from_decimal(post.get("workToProve", 0)) post["priority"] = from_decimal(post["priority"]) if not is_array(post.get("topics")): post["topics"] = [post["topics"]] if post.get("topics") else [] post["topics"] = [topic if is_0x_prefixed(topic) else encode_hex(topic) for topic in post["topics"]] return post
[ "def outputPostFormatter(post):\n\n post[\"expiry\"] = to_decimal(post[\"expiry\"])\n post[\"sent\"] = to_decimal(post[\"sent\"])\n post[\"ttl\"] = to_decimal(post[\"ttl\"])\n post[\"workProved\"] = to_decimal(post[\"workProved\"])\n\n if not post.get(\"topics\"):\n post[\"topics\"] = []\n\n post[\"topics\"] = [decode_hex(topic) for topic in post[\"topics\"]]\n\n return post", "def _encode_post(self):\n\t\tpost_data = self.config.get('post_data')\n\t\tif post_data is not None:\n\t\t\tpost_data = self._encode_data(post_data, self.config.get('post_data_bits'))\n\t\t\tif self.config.get('post'):\n\t\t\t\tpost_pulse = self._encode_tuple(self.config['post'])\n\t\t\t\treturn post_pulse + post_data\n\t\t\telse:\n\t\t\t\treturn post_data", "def _stata_hex_format(self, value):\n return self._convert_hex(float(value).hex())", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def f_hex_to_ascii(self):\n return self.input.decode('hex')", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)", "def _get_hexesc(data: str) -> str:\n temp = []\n for char in data:\n temp.append(hex(ord(char)).replace(\"0x\", \"\\\\\\\\x\"))\n return \"\".join(temp)", "def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")", "def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash", "def _get_hexplain(data: str) -> str:\n temp = []\n for char in data:\n temp.append(hex(ord(char)).replace(\"0x\", \"\"))\n return \"\".join(temp)", "def phex(value, expected):\n return f\"{value:#0{expected}x}\"", "def pretty_hebrew(val):\n return 'font-size:20px; font-family: Times New Roman; text-align: right; max-width: 500px'", "def hex_format(value):\n\t\treturn \"#{hex}\".format(\n\t\t\t\t\thex=value[:-1]\n\t\t\t\t) if value[-1] == \"#\" else value", "def format_data(self, data):\n pass", "def preprocess_hex_chars(self, text) :\n preprocessed_text = ''\n\n i = 0\n while i < len(text) :\n if '\\\\x' == text[i:i+2] :\n c = int(text[i+2:i+4], base=16)\n preprocessed_text += chr(c)\n i += 4\n else :\n preprocessed_text += text[i]\n i += 1\n\n return preprocessed_text", "def reformat_epc(epc_barr):\n # Convert the string of decimal data to list of decimal data\n epc_barr = list(epc_barr)\n\n # Initialize variable epc_hex with an empty string\n epc_hex = ''\n\n # Convert each member in list epc_bar to hexadecimal\n # and append it to variable epc_hex\n for epc in epc_barr:\n epc_hex += '{:02X}'.format(epc)\n\n # Return the value stored in variable epc_hex\n return epc_hex", "def toHex(self):\n \n t=self.boolVals[:]\n t.reverse()\n \n string=str(self)\n \n \n string=hex(int(string,2))\n string=string[2:]\n\n d=ceil(self.n/4)-len(string)\n string=d*\"0\"+string\n return string", "def wkb_hex(self): # -> str:\n ...", "def __format__(self, _format):\n if _format.upper() == \"WIF\":\n return base58CheckEncode(0x80, self._hex)\n elif _format.upper() == \"ENCWIF\":\n return base58encode(self._hex)\n elif _format.upper() == \"BTC\":\n return base58CheckEncode(0x00, self._hex)\n elif _format.upper() in known_prefixes:\n return _format.upper() + str(self)\n else:\n log.warn(\"Format %s unkown. You've been warned!\\n\" % _format)\n return _format.upper() + str(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats the output of a received post message
def outputPostFormatter(post): post["expiry"] = to_decimal(post["expiry"]) post["sent"] = to_decimal(post["sent"]) post["ttl"] = to_decimal(post["ttl"]) post["workProved"] = to_decimal(post["workProved"]) if not post.get("topics"): post["topics"] = [] post["topics"] = [decode_hex(topic) for topic in post["topics"]] return post
[ "def inputPostFormatter(post):\n\n post[\"ttl\"] = from_decimal(post[\"ttl\"])\n post[\"workToProve\"] = from_decimal(post.get(\"workToProve\", 0))\n post[\"priority\"] = from_decimal(post[\"priority\"])\n\n if not is_array(post.get(\"topics\")):\n post[\"topics\"] = [post[\"topics\"]] if post.get(\"topics\") else []\n\n post[\"topics\"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)\n for topic in post[\"topics\"]]\n\n return post", "def render_post(response, post):\n response.out.write('<b>' + post.subject + '</b><br>')\n response.out.write(post.content)", "def render_post(response, post):\n\n response.out.write('<b>' + post.subject + '</b><br>')\n response.out.write(post.content)", "def output_message(message_bytes):\n print('\\1\\1\\1\\1') # MMDF delimiter\n # Todo: figure out actual encoding\n print(decode_body_part(message_bytes, 'utf-8'))\n print('\\1\\1\\1\\1') # MMDF delimiter", "def _encode_post(self):\n\t\tpost_data = self.config.get('post_data')\n\t\tif post_data is not None:\n\t\t\tpost_data = self._encode_data(post_data, self.config.get('post_data_bits'))\n\t\t\tif self.config.get('post'):\n\t\t\t\tpost_pulse = self._encode_tuple(self.config['post'])\n\t\t\t\treturn post_pulse + post_data\n\t\t\telse:\n\t\t\t\treturn post_data", "def _PackOutput(msg):\n print msg", "def printPost(self, id):\n enc = getpreferredencoding()\n output = self._extractPost(id)['formatted_text']\n print output.encode(enc)", "def create_output(self, messages):", "def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)", "def my_form_post():\n all_output = feedline(request.form['text'],True)\n all_output = re.sub('\\n','<br>',all_output) # Change \\n to <br>\n html = \"\"\"\n <html>\n <head>\n <title>MyPython WebApp</title>\n </head>\n <body>\n <div id=\"container\">\n <div class=\"title\">\n <h1>MyPython WebApp</h1>\n </div>\n </div>\n <p>%s\n </p>\n <div id=\"content\">\n <form action=\".\" method=\"POST\">\n <input type=\"text\" name=\"text\">\n <input type=\"submit\" name=\"my-form\" value=\"Send\">\n </form>\n </div>\n </body>\n </html>\"\"\" %(all_output)\n return html", "def post(self):\n r = request.get_json()['text']\n # Recupero dalla richiesta il JSON che mi è stato inviato\n # e salvo il valore contenuto \n # sotto la chiave text su una variabile r.\n # In reguito ritorno r in formato stringa.\n # In quanto il valore di ritorno deve essere una stringa.\n return str(r)", "def _post_message_hook(self, f):\n f.write(linesep + b'\\001\\001\\001\\001' + linesep)", "def format_response(response):\n start_line = _format_status_line(response.status, response.reason)\n msg = _format_message(start_line, response.header, response.body)\n return msg", "def create_series_msg(self, post_url: str) -> str:\n return self._render(\"series-pm\", post_url=post_url)", "def format_response_for_display(self, response, case):\n out_bits = []\n parsed = self.parse_response(response, case)\n\n request = parsed['request']\n out_bits.append(request['request_line'])\n for header, value in request['headers'].items():\n out_bits.append('%s: %s' % (header, value))\n if request['body']:\n out_bits.extend(('', request['body']))\n\n out_bits.extend([''] * 2)\n\n response = parsed['response']\n out_bits.append(response['response_line'])\n for header, value in response['headers'].items():\n out_bits.append('%s: %s' % (header, value))\n if response['body']:\n out_bits.extend(('', response['body']))\n\n return '\\n'.join(out_bits)", "def format_tmux_resp(std_resp):\n return '\\n'.join(\n [\n '\\n'.join(prepend_tab(std_resp.stdout)),\n click.style('\\n'.join(prepend_tab(std_resp.stderr)), fg='red'),\n ]\n )", "def output_raw_message(text):\n database.messages_output_queue.put(text)", "def output(self) -> Dict:\n broker = self._find_broker()\n final_output = ';'.join([self.content['id_message'], broker]) + '\\n'\n\n phone_number = self.content['ddd'] + self.content['phone']\n\n sent_time_details = self.content['sent_time'].split(':')\n sent_time = time(int(sent_time_details[0]), int(sent_time_details[1]), int(sent_time_details[2]))\n\n answer = {\n 'output': final_output,\n 'phone_number': phone_number,\n 'sent_time': sent_time,\n }\n\n return answer", "def __format_inner(self, post_inner):\n text = ''\n p_tags = post_inner.find_all(\"p\")\n for p in p_tags:\n text += self.__format_text(p)\n data_inner = {\n \"text\": text\n }\n\n return data_inner" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the import from local file for cities works fine
def test_csv_import_city(self): from django.contrib.messages import get_messages path = reverse("import-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) file = open("city.csv") client = Client() client.force_login(user) r = client.post(path, {"title": "city", "csv_file": file}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) == 1 assert str(messages[0]) == "Successfully Uploaded!"
[ "def load_cities():\n return os.listdir(DATA_DIR)", "def import_localities_fromfile(self, filePath=None, separator=';'):\n if not self.portal_type == 'UrbanTool':\n raise Unauthorized(\"This script must be called on portal_urban!\")\n if not hasattr(self, 'streets'):\n raise AttributeError(\"The streets folder does not exist in portal_urban!\")\n\n #if no filePath is defined, take the localities.txt file stored here\n if not filePath:\n filePath = ext.__path__[0] + '/localities.txt'\n if not os.path.isfile(filePath):\n raise ImportError(\"The localities.txt file does not exist in Products.urban.Extensions. Try using your own file by passing it as parameter to this ExternalMethod.\")\n\n file = open(filePath, 'r')\n numberOfRecords = len(file.readlines())\n file.seek(0)\n i = 1\n streetFolder = getattr(self, 'streets')\n for line in file.readlines():\n print \"Importing locality %d of %d\" % (i, numberOfRecords)\n i = i + 1\n city, zipcode, localityName, alsoCalled = line.strip().split(separator)\n cityId = self.plone_utils.normalizeString(city)\n if not hasattr(aq_base(streetFolder), cityId):\n #if the city still does not exist, we create it\n cityObjId = streetFolder.invokeFactory('City', id=cityId, title=city, zipCode=zipcode)\n cityObj = getattr(streetFolder, cityObjId)\n cityObj.reindexObject()\n else:\n cityObj = getattr(streetFolder, cityId)\n try:\n localityId = self.plone_utils.normalizeString(localityName)\n cityObj.invokeFactory('Locality', id=localityId, localityName=localityName, alsoCalled='\\n'.join(alsoCalled.split('|')))\n except BadRequest:\n print (\"The locality with id '%s' already exists!\" % (localityId))\n file.close()\n file.close()", "def GetWorldCities():\n return GetDataFromCsvFile('world_cities.csv')", "def test_import_same_name(self):\r\n\r\n self.assertEqual(Place.objects.all().count(), 0)\r\n assert test_client_login(self.client, username='admin@example.org', password='123') == True\r\n\r\n csv_file = StringIO(\"Donut Mountain,123 Fakey St.,1.0,2.0,http://www.example.org/bs/0\\nDonut Mountain,99 Fakley St.,99.0,22.0,http://www.example.org/bs/1\")\r\n csv_file.name = 'test.csv'\r\n response = self.client.post(self.import_url, {'place_type': '1', 'csv_file': csv_file})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(Place.objects.all().count(), 2)\r\n\r\n locs = []\r\n for place in Place.objects.filter(normalized_name='DONUT MOUNTAIN').all():\r\n locs.append((place.address, place.location.x, place.location.y, place.url))\r\n\r\n assert ('123 Fakey St.', 2.0, 1.0, 'http://www.example.org/bs/0') in locs\r\n assert ('99 Fakley St.', 22.0, 99.0, 'http://www.example.org/bs/1') in locs", "def load_cities(self, sqlpath=\":memory:\"):\n\n # cities1000.zip should be in this same directory\n file_dir = os.path.dirname(os.path.abspath(__file__))\n file_name = 'cities1000.zip'\n full_zip_path = os.path.join(file_dir, file_name)\n\n # File in zip archive\n member_name = 'cities1000.txt'\n\n with zipfile.ZipFile(full_zip_path, mode='r') as z:\n handle = z.open(member_name)\n\n reader = csv.reader(handle, delimiter='\\t')\n\n # Hack: work around default field size limit in CSV module\n csv.field_size_limit(sys.maxsize)\n\n self.create_sql_db(sqlpath, self.filter_rows(reader))", "def _import_insee_city(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'comsimp2011.csv'))\n city_obj = self.pool.get('insee.city')\n department_obj = self.pool.get('insee.department')\n with open(filepath, 'rb') as cityfile:\n reader = csv.DictReader(cityfile)\n for row in reader:\n args = [('dep', '=', row['DEP'])]\n department_ids = department_obj.search(cr, uid, args)\n department_id = department_ids and department_ids[0] or None\n ncc = row['ARTMAJ'] and row['ARTMAJ'].strip(\"()\") + \\\n row['NCC'] or row['NCC']\n nccenr = row['ARTMIN'] and row['ARTMIN'].strip(\"()\") + \\\n row['NCCENR'] or row['NCCENR']\n values = {\n 'cdc': row['CDC'],\n 'cheflieu': row['CHEFLIEU'],\n 'reg': row['REG'],\n 'dep': row['DEP'],\n 'department_id': department_id,\n 'com': row['COM'],\n 'ar': row['AR'],\n 'ct': row['CT'],\n 'tncc': row['TNCC'],\n 'artmaj': row['ARTMAJ'],\n 'ncc': ncc,\n 'artmin': row['ARTMIN'],\n 'nccenr': nccenr,\n }\n city_obj.create(cr, uid, values, context=context)", "def importcities(filename, minpopulation):\n with open(filename, 'rb') as inputfile:\n nonulls = (line.replace('\\0', '') for line in inputfile)\n reader = csv.DictReader(nonulls, fieldnames=cityfilefields,\n delimiter='\\t')\n conn = db.connect('temp.db')\n cur = conn.cursor()\n # create the table for storing the cities\n cur.execute('CREATE TABLE cities(name TEXT NOT NULL)')\n # add a geometry column (table, field, coord system, type, dimensions)\n # 4326 is the EPSG SRID for WGS 84\n cur.execute(\"SELECT AddGeometryColumn('cities', 'geom', 4326, \" +\n \"'POINT', 'XY')\")\n for row in reader:\n citysize = int(row['population'])\n if citysize < minpopulation:\n continue\n cityname = row['asciiname']\n # escape apostrophes in city names\n if re.search(\"'\", cityname):\n cityname = re.sub(\"'\", \"''\", cityname)\n citygeom = \"GeomFromText('POINT(\"\n citygeom += str(row['latitude']) + \" \" + str(row['longitude'])\n citygeom += \")', 4326)\"\n query = \"INSERT INTO cities(name, geom) \"\n query += \"VALUES ('\" + cityname + \"', \" + citygeom + \")\"\n cur.execute(query)\n conn.commit()\n conn.close()", "def test_import_csv(self):\r\n \r\n self.assertEqual(Place.objects.all().count(), 0)\r\n assert test_client_login(self.client, username='admin@example.org', password='123') == True\r\n\r\n csv_file = StringIO(\"Donut Mountain,123 Fakey St.,1.0,2.0\\nDonut House,124 Fakey St.,1.001,2.001\")\r\n csv_file.name = 'test.csv'\r\n response = self.client.post(self.import_url, {'place_type': '1', 'csv_file': csv_file})\r\n self.assertEqual(response.status_code, 200)\r\n \r\n self.assertEqual(Place.objects.all().count(), 2)\r\n \r\n place = Place.objects.get(normalized_name='DONUT MOUNTAIN')\r\n self.assertEqual(place.address, '123 Fakey St.')\r\n self.assertEqual(place.location.x, 2.0)\r\n self.assertEqual(place.location.y, 1.0)\r\n \r\n place = Place.objects.get(normalized_name='DONUT HOUSE')\r\n self.assertEqual(place.address, '124 Fakey St.')\r\n self.assertEqual(place.location.x, 2.001)\r\n self.assertEqual(place.location.y, 1.001)", "def load_cities_table():\n cities = {}\n with open(str(BASE_DIR.joinpath('cidades.csv'))) as cities_file:\n for code, city, _ in (c.split(';') for c in cities_file):\n cities[code[:7]] = city\n return cities", "async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())", "def __init__(self, path = None, url = URL_MAXMIND_GEOCITY):\n self.path = path or \"GeoLiteCity.dat\"\n self.url = url\n self._load()", "def test_citydistricts_get(self):\n pass", "def import_streets_fromdb(self, cityName=''):\n if not check_role(self):\n return \"You must have a manager role to run this script\"\n\n out = ['<html>']\n lf = '<br />'\n if not cityName:\n #out.append(\"available properties:%s\"%portal.portal_memberdata.propertyItems())\n out.append(\"You must call the script followed by needed parameters:\")\n out.append(\"-> cityName=... name of the commune\")\n out.append(\"by example ...?cityName=La Bruyère<br/>\")\n return lf.join(out)\n\n tool = api.portal.get_tool('portal_urban')\n if tool is None:\n return \"No portal_urban found on this site: is urban installed?\"\n if not hasattr(tool, 'streets'):\n return \"The 'streets' folder does not exist in portal_urban!\"\n\n ex_streets = {}\n ret = load_existing_streets(self, ex_streets)\n out += ret\n\n config_bestaddress = ExternalConfig('bestaddress')\n bestaddress = BestaddressService(**(config_bestaddress.bestaddress))\n session = bestaddress.new_session()\n results = session.query_streets(cityName)\n session.close()\n bestaddress.engine.dispose()\n if not results:\n return \"No record found for city name '%s', maybe mispelled ?\" % cityName\n numberOfRecords = len(results)\n out.append(\"%d streets found in the database\" % numberOfRecords)\n i = 1\n for street in results:\n print \"Importing street %d of %d\" % (i, numberOfRecords)\n ret = createStreet(\n city=street.short_entity,\n zipcode=street.zip,\n streetcode=street.national_code,\n streetname=street.street,\n bestAddresskey=street.key,\n startdate=street.begin_date,\n enddate=street.end_date,\n regionalroad=street.regional_road,\n ex_streets=ex_streets\n )\n out += ret\n i = i + 1\n return lf.join(out) + '</html>'", "def test_import_same_name_synonym(self):\r\n\r\n self.assertEqual(Place.objects.all().count(), 0)\r\n assert test_client_login(self.client, username='admin@example.org', password='123') == True\r\n\r\n csv_file = StringIO(\"Donut Mountain,123 Fakey St.,1.0,2.0,http://www.example.org/bs/0,123 D House, Mount D\\nDonut Mountain,99 Fakley St.,99.0,22.0,http://www.example.org/bs/1,99 D House, Mount D\")\r\n csv_file.name = 'test.csv'\r\n response = self.client.post(self.import_url, {'place_type': '1', 'csv_file': csv_file})\r\n self.assertEqual(response.status_code, 200)\r\n\r\n self.assertEqual(Place.objects.all().count(), 2)\r\n\r\n locs = []\r\n for place in Place.objects.filter(normalized_name='DONUT MOUNTAIN').all():\r\n synonyms = set([x.normalized_name for x in PlaceSynonym.objects.filter(place=place).all()])\r\n ll = [place.address, place.location.x, place.location.y, place.url]\r\n ll.extend(sorted(synonyms))\r\n locs.append(tuple(ll))\r\n\r\n assert ('123 Fakey St.', 2.0, 1.0, 'http://www.example.org/bs/0', '123 D HOUSE', 'MOUNT D') in locs\r\n assert ('99 Fakley St.', 22.0, 99.0, 'http://www.example.org/bs/1', '99 D HOUSE', 'MOUNT D') in locs", "def load_cities (filename):\n if not os.path.isfile(filename):\n return None\n # try to decode a plain file\n try:\n with open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n # try to decode a gzipped file\n try:\n with gzip.open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n return None", "def load_city_cp():\n with open(os.path.join(CITY_COORDS_FILE_PATH, 'city_center_point_coords.json'), encoding=\"utf8\") as file:\n return json.load(file)", "def location_data_import(transactional_db):\n output = io.StringIO()\n call_command(\n 'import_locations',\n location_file='tests/fixtures/importer/locations.geojson',\n stdout=output\n )\n return output", "def test_import_car(self, mock_db):\n import_cars(\"csv-data/car-set-1.csv\")", "def test_import_data(self):\n self.assertEqual(import_data(src_dir, 'product_file.csv', 'customer_file.csv', 'rental_file.csv'), ((9999, 9999, 9999), (1, 0, 1)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log control data at each step during evaluation.
def _log_control_data(self, action, global_reward): action_r = ','.join(['%d' % a for a in action]) cur_control = {'episode': self.cur_episode, 'step': self.t, 'action': action_r, 'reward': global_reward} self.control_data.append(cur_control)
[ "def log(self):\n self.simulator.log()", "def log(self, variable_name, data, t_step):\n self.variable_names[variable_name].step(data, t_step)", "def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)", "def log_evaluation_details(self, data, labels, level=logging.INFO) -> float:\n pass", "def on_eval_batch_begin(self, step, logs=None):", "def on_eval_begin(self, logs=None):", "def log(self, episode, step, observations, reward, done, info):\n for i, agent in enumerate(self.world.agents):\n agent_data = [episode, step, i] + self.logging_callback(agent, self.world) + [reward[i],\n done[i],\n info['n'][i]]\n self.logger.add(\"State\", agent_data)", "def log(self):\n pass", "def _log_train(self, step, train_info, ep_info):\n for k, v in train_info.items():\n if np.isscalar(v) or (hasattr(v, \"shape\") and np.prod(v.shape) == 1):\n wandb.log({\"train_rl/%s\" % k: v}, step=step)\n else:\n wandb.log({\"train_rl/%s\" % k: [wandb.Image(v)]}, step=step)\n\n for k, v in ep_info.items():\n wandb.log({\"train_ep/%s\" % k: np.mean(v)}, step=step)\n wandb.log({\"train_ep_max/%s\" % k: np.max(v)}, step=step)", "def log_all(self):\n self.save_raw()\n self.log()", "def log(self, value):\n for element, acc in self.accumulators:\n acc.log(element, self.varname, self.category, value)", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def record_step(self):\n # Only record a step if it is different from the last one\n if self._values_changed:\n self._values_changed = False\n\n # Use copy of values in case the printer decides to keep the dict\n # (Only affects the mock in the tests right now, but you never know...)\n values = self._current_values.copy()\n\n # Print only once every print_interval calls\n if self._counter % self.print_interval == 0:\n # Pass values to printers\n for p in self.printers:\n p.print_values(values, self._value_keys, self._first_step)\n\n # Definitely not the first step any more\n self._first_step = False\n\n # Increase call counter\n self._counter += 1", "def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)", "def on_eval_end(self, logs=None):", "def _logging(self):\n msgs = []\n # patch to log stdout spawned processes of dataloader\n logger = init_logger()\n for ds_name, ds_count in self._counts.items():\n msgs.append(f\"\\t\\t\\t* {ds_name}: {ds_count}\")\n logger.info(\"Weighted corpora loaded so far:\\n\" + \"\\n\".join(msgs))", "def _m_step(self):\n log_mu_ij = ((self.log_mu[0,:] * np.ones((self.data.num_workers, self.data.num_instance))).transpose())\n log_one_minus_mu_ij = ((self.log_mu[1,:] * np.ones((self.data.num_workers, self.data.num_instance))).transpose())\n alpha_log_denomi = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y != 0))\n alpha_log_nume_pos = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y == 1))\n alpha_log_nume_neg = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y == -1))\n beta_log_denomi = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y != 0))\n beta_log_nume_pos = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y == 1))\n beta_log_nume_neg = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y == -1))\n self.log_p = sp.special.logsumexp(self.log_mu, axis=1)\n self.log_p = self.log_p - sp.special.logsumexp(self.log_p)\n self.log_alpha = np.array([alpha_log_nume_pos - alpha_log_denomi, alpha_log_nume_neg - alpha_log_denomi])\n self.log_beta = np.array([beta_log_nume_neg - beta_log_denomi, beta_log_nume_pos - beta_log_denomi])", "def on_eval_batch_end(self, step, logs=None):", "def print_steps(self):\r\n steps = self.get_all_steps()\r\n for i, step in enumerate(steps):\r\n exp_logger.info(\"Step %s: %s\" % (i, str(step)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get actions of each agents neighbour in the graph.
def get_neighbor_action(self, action): naction = [] for i in range(self.n_agent): naction.append(action[self.neighbor_mask[i] == 1]) return naction
[ "def k(self, i):\n es = self.graph.es.select(_source=i)\n actions = []\n for edge in es:\n action = edge[\"action\"]\n if action not in actions:\n actions.append(action)\n return actions", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def get_actions(self):\n for i, j, h in self.get_towers():\n for action in self.get_tower_actions(i, j):\n yield action", "def _target_acts(self, obs_all_agents):\n target_actions = []\n if TARGET_NOISE: noise = self.add_noise().to(device)\n for ai in range(self.num_agents):\n agent = self.agents_list[ai]\n target_action = agent._target_act(obs_all_agents[ai])\n if TARGET_NOISE: target_action += noise\n target_actions.append(target_action)\n\n return target_actions #list of num_agents; @batchsize x action size", "def act(self, states: np.ndarray, eps: float = 0.0) -> List[np.ndarray]:\n actions = [\n agent.act(state.reshape(-1, 1).T, eps)\n for agent, state in zip(self.agents, states)\n ]\n return actions", "def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list", "def get_actions(self):\n return self.agent.get_actions()", "def get_avail_actions(self):\r\n return [[1 for _ in range(self.n_actions)] for agent_id in range(self.n_agents)]", "def traverse(self, action_details: Dict):\n agent = action_details[\"agent_id\"]\n self.agents[agent-1].traversing = True\n # distanation node\n dest_node = action_details[\"to\"]\n\n # TODO add checks for from and to nodes\n\n node1, node2, distance = self.agents_location[agent]\n # people_collected = 0\n \n # If the agent is in node ( not on the edge ) check if the distination node is its neighbor\n if node1 == node2 and self.graph.is_neighbours(node1, dest_node) and not (node2,dest_node) in self.blocked_edges :\n # Get (node1,dest_node) edge weight\n\n edge_weight = self.graph.get_weight(node1, dest_node)\n\n # Move the agent into the edge (node1,dest_node)\n distance = edge_weight - 1\n self.agents_location[agent] = [node1, dest_node, distance]\n action_succeed = True\n\n # If the agent is already inside the edge , check whether destination node is correct\n elif node1 != node2 and node2 == dest_node:\n\n # Move the agent one step on the edge\n distance -= 1\n self.agents_location[agent][2] = distance\n\n action_succeed = True\n else:\n # If the destination node is wrong\n action_succeed = False\n # TODO write warning\n\n # If the agent arrived to some node , collect all the people there and change the location from [node1,node2,X]\n # to [dest_node,dest_node,0]\n if distance == 0 and action_succeed:\n self.agents_location[agent] = [dest_node, dest_node, 0]\n self.agents[agent-1].traversing = False\n self.agents[agent-1].location = dest_node\n action_succeed = True\n\n self.agents_last_action[agent] = action_succeed\n\n new_observation = self.get_observation({})\n\n return new_observation", "def act(self, all_states, add_noise=True): \n actions=[]\n for id,agent in enumerate(self.ddpg_agents):\n # call to act of each ddpg_agent\n action = agent.act((all_states[id]),add_noise)\n actions.append(action)\n return actions", "def _obtain_OtherAgentsActionsSummationTensor(self):\n dim = np.concatenate(([self.N], # agent i\n [self.N for _ in range(self.N-1)], # other agnt\n [self.M], # agent a of agent i\n [self.M for _ in range(self.N)], # all acts\n [self.M for _ in range(self.N-1)])) # other a's\n Omega = np.zeros(dim.astype(int), int)\n\n for index, _ in np.ndenumerate(Omega):\n I = index[0]\n notI = index[1:self.N]\n A = index[self.N]\n allA = index[self.N+1:2*self.N+1]\n notA = index[2*self.N+1:]\n\n if len(np.unique(np.concatenate(([I], notI)))) is self.N:\n # all agents indicides are different\n\n if A == allA[I]:\n # action of agent i equals some other action\n cd = allA[:I] + allA[I+1:] # other actionss\n areequal = [cd[k] == notA[k] for k in range(self.N-1)]\n if np.all(areequal):\n Omega[index] = 1\n\n return Omega", "def getPossibleActions(self):\n \n return actions.all_agent_actions()", "def get_neighbours(self, state):\n state_list = []\n for action in self.actions:\n state_list.append(self.transition(state, action))\n return list(set(state_list))", "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions", "def get_possible_actions(self) -> [Action]:\r\n if self.fields[self.agent_x][self.agent_y] == Field.EMPTY or self.terminated:\r\n return [Action.NORTH, Action.EAST, Action.SOUTH, Action.WEST]\r\n else: # must be terminal\r\n return [Action.TERMINAL]", "def solution(self):\n return [node.action for node in self.path()[1:]]", "def actions(self, state):\n\n\t\tpossibleActions = []\n\n\t\tflashlightLocation = state[0]\n\n\t\t\"\"\"\n\t\t\tIf a person is on the side of the flashlight, then they can cross the bridge by themselves or \n\t\t\tthey can cross with another person who is also on their side (the side of the flashlight).\n\t\t\t-\tSo we add an action for this person crossing by themselves, and also actions for them crossing\n\t\t\t\twith other people (each of these actions is them crossing with one of these other \n\t\t\t\tpeople, making 2 of them crossing the bridge)\n\t\t\t\t\n\t\t\tNote that person i and person j crossing the bridge is the same action as person j and person i crossing, \n\t\t\tand we only want to add this action once so when determining the people that person i can cross with \n\t\t\twe look at people who come after this person i (a person j where j > i) \n\t\t\"\"\"\n\n\t\tfor personI in range(1, self.n+1): # exclude the flashlight - only traverse the peoples' locations\n\t\t\tif state[personI] == flashlightLocation: #This person can cross the bridge\n\t\t\t\taction = [personI] # This person (person i) can cross bridge on their own (with the flashlight)\n\t\t\t\tpossibleActions.append(action)\n\t\t\t\tfor personJ in range(personI+1, self.n+1):\n\t\t\t\t\tif state[personJ] == flashlightLocation: # This person (person j) can cross the bridge\n\t\t\t\t\t\taction = [personI, personJ] # person i can cross the bridge with person j (and the flashlight)\n\t\t\t\t\t\tpossibleActions.append(action)\n\n\t\treturn possibleActions", "def gen_action(self, agent_list, observation, free_map=None):\n action_list = []\n\n for i in len(agent_list):\n #TODO print observationt omake sure it is the right thing\n state = observation\n\n if np.random.rand(1) < epsilon:\n q_values = self.online_model(state)\n action = env.action_space.sample()\n\n else:\n q_values = self.online_model(state)\n _, action = torch.max(q_values, 1)\n action = action.item()\n\n return action, q_values\n\n action =\n return action_list", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! resources object of Resources class contain resources from config file options object of MergeOptions class contain merge options from config file str_name default value same as the class name "SynsetsSUMOMerger2"
def __init__(self, resources, options, str_name = 'SynsetsSUMOMerger2'): super(SynsetsSUMOMerger2, self).__init__(resources, options, str_name)
[ "def __init__(self, resources, options, str_name = 'SynsetsMSRMerger'):\n super(SynsetsMSRMerger, self).__init__(resources, options, str_name)", "def test_merge_file_multiple(self):\n cfg = gc3libs.config.Configuration()\n cfg.merge_file(self.f1)\n cfg.merge_file(self.f2)\n cfg.merge_file(self.f3)\n assert cfg.resources['localhost']['seq'] == '3'\n assert cfg.resources['localhost']['foo'] == '2'", "def register_resources(self, resources):\n from tw.api import merge_resources\n merge_resources(self.request_local.resources, resources)", "def MergeLogic(self) -> str:", "def merge_resources(workDir, decompileDir, channel):\n\n baseResPath = os.path.join(decompileDir, 'res')\n channelResPath = os.path.join(workDir, 'sdk/' + channel['sdk'] + '/res')\n\n pluginPaths = list()\n pluginFolders = os.path.join(workDir, 'plugins')\n\n if os.path.exists(pluginFolders):\n for f in os.listdir(pluginFolders):\n pluginPaths.append(os.path.join(pluginFolders, f + \"/plugin/res\"))\n\n ResourceMerger.merge(baseResPath, channelResPath, pluginPaths)", "def mergeConfig(self):\n config = \\\n \"from Configuration.DataProcessing.Merge import mergeProcess\\nprocess = mergeProcess(\\n \"\n config += \",\".join(self.merge_inputs)\n config += \",\\n\"\n config += \" output_file = \\\"%s\\\",\\n\" % os.path.basename(self.lfn)\n config += \" output_lfn = \\\"%s\\\"\\n) \" % self.lfn\n return config", "def merge(): #Status: WIP\r\n pass", "def import_terminology(self, Name: str, MergeStrategy: str, TerminologyData: Dict, Description: str = None, EncryptionKey: Dict = None) -> Dict:\n pass", "def createMergedConfigFile(self):\n # Read config data\n if os.path.isfile(self.config_file):\n with open(self.config_file, 'r') as stream:\n try:\n cfg = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n if debug:\n print(\"Using Config file: \" + self.config_file)\n else:\n if debug:\n print(\"Config file does not exist: \" + self.config_file)\n exit(1)\n\n # If project namespace was not in the config file, set a default\n if (cfg is not None\n and 'generic' in cfg\n and 'project_namespace' in cfg['generic']\n and cfg['generic']['project_namespace'] is not None\n and len(cfg['generic']['project_namespace']) > 0):\n if debug:\n print(\"Using specified namespace\")\n else:\n conf_dir = os.path.dirname(self.config_file)\n cmd = \"cd \" + conf_dir + ' && basename `git rev-parse --show-toplevel`'\n try:\n result_bytes = subprocess.check_output(cmd,\n timeout=300,\n shell=True)\n project_namespace = result_bytes.decode('UTF-8').rstrip()\n if debug:\n print(\"Derived namespace from git: \" + project_namespace)\n except subprocess.CalledProcessError as e:\n if debug:\n print(\"Error deriving project namespace from git: \", e.output)\n sys.exit(1)\n # Insert the project_namespace into the config data\n if cfg is None:\n cfg = {}\n if 'generic' not in cfg:\n cfg['generic'] = {}\n cfg['generic']['project_namespace'] = project_namespace\n\n # Confirm project namespace\n if debug:\n print(\"Project Namespace: \" + cfg['generic']['project_namespace'])\n\n # Read overrides\n override_file_data = {}\n if os.path.isfile(self.override_file):\n with open(self.override_file, 'r') as stream:\n try:\n override_file_data = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Created merged data\n self.config_data = cfg\n # print(\"Applying override_file_data: \" + str(override_file_data))\n if override_file_data is not None:\n self.config_data = merge(self.config_data, override_file_data)\n\n # Ensure parent directory for merged file exists\n directory = Path(self.merged_file).parent\n if not os.path.exists(directory):\n os.makedirs(directory)\n # Created merged file\n with open(self.merged_file, 'w') as out_file:\n yaml.dump(self.config_data, out_file)", "def merge(self, skel):\n return Skeleton.simple_merge((self, skel)).consolidate()", "def merge_spec(self):\n from django_swagger_utils.spec_client.merge_spec import MergeSpec\n merge_spec = MergeSpec(self.paths['api_spec_dir'], self.paths['base_dir'])\n merge_spec.merge()", "def append_resource_info(main_curr, chunk_db):\n cmd = \"attach ? as toMerge\"\n main_curr.execute(cmd, (chunk_db, ))\n\n cmd = \"INSERT INTO resources SELECT * FROM toMerge.resources\"\n main_curr.execute(cmd)\n\n cmd = \"detach toMerge\"\n main_curr.execute(cmd)", "def defineResources():\n # Read the resources.cfg file and add all resource locations in it\n cf = ogre.ConfigFile()\n cf.load(\"resources.cfg\")\n seci = cf.getSectionIterator()\n while seci.hasMoreElements():\n secName = seci.peekNextKey()\n settings = seci.getNext()\n\n for item in settings:\n typeName = item.key\n archName = item.value\n ogre.ResourceGroupManager.getSingleton().addResourceLocation(\n archName, typeName, secName)", "def resources(self, resources):\n self._resources = resources", "def merge_configs(parameters_config: str, dataset_config: str, overrides: Dict) -> Params:\n mergedSettings = Params.from_file(parameters_config).as_dict()\n mergedSettings = with_fallback(overrides, mergedSettings)#.update(overrides)\n #mergedSettings = Params(mergedSettings)\n dataset_config = Params.from_file(dataset_config)\n defaultDecoder = mergedSettings['model'].pop('default_decoder')\n orderedStuff = {}\n mergedSettings['dataset_reader']['datasets'] = {}\n mergedSettings['model']['decoders'] = {}\n\n for dataset in dataset_config:\n dataReader = {} \n dataReader['train'] = dataset_config[dataset]['train_data_path']\n dataReader['dev'] = dataset_config[dataset]['validation_data_path']\n if 'test_data_path' in dataset_config[dataset]:\n dataReader['test'] = dataset_config[dataset]['test_data_path']\n\n if 'word_idx' in dataset_config[dataset]:\n dataReader['word_idx'] = dataset_config[dataset]['word_idx']\n else:\n dataReader['sent_idxs'] = dataset_config[dataset]['sent_idxs']\n \n dataReader['tasks'] = {}\n if 'copy_other_columns' in dataset_config[dataset]:\n dataReader['copy_other_columns'] = dataset_config[dataset]['copy_other_columns']\n else:\n dataReader['copy_other_columns'] = mergedSettings['model']['default_dataset']['copy_other_columns']\n\n for task in dataset_config[dataset]['tasks']:\n taskOverride = dataset_config[dataset]['tasks'][task]\n decoder = copy.deepcopy(defaultDecoder)\n decoder.update(taskOverride)\n\n decoder['dataset'] = dataset\n decoder['task'] = task\n\n dataReader['tasks'][task] = copy.deepcopy(decoder)\n orderIdx = decoder['order']\n if 'task_type' not in decoder:\n logger.warning('Error, task ' + task + ' has no defined task_type')\n exit(1)\n curTrans = decoder['task_type']\n curLayer = decoder['layer']\n \n\n if decoder['task_type'] == 'dependency':\n decoder['type'] = 'machamp_dependency_decoder'\n if 'metric' not in dataReader['tasks'][task]:\n decoder['metric'] = 'LAS'\n if 'tag_representation_dim' not in dataReader['tasks'][task]:\n decoder['tag_representation_dim'] = 256\n if 'arc_representation_dim' not in dataReader['tasks'][task]:\n decoder['arc_representation_dim'] = 768\n\n elif decoder['task_type'] == 'classification':\n decoder['type'] = 'machamp_sentence_classifier'\n #ROB TODO why do we need empty kwargs?\n decoder['kwargs'] = {}\n\n elif decoder['task_type'] == 'multiseq':\n decoder['type'] = 'multiseq_decoder'\n\n elif decoder['task_type'] in ['seq', 'string2string']:\n if 'decoder_type' in decoder and decoder['decoder_type'] == 'crf':\n decoder['type'] = 'masked_crf_decoder'\n del decoder['decoder_type']\n del decoder['decoder_type']\n else:\n decoder['type'] = 'machamp_tag_decoder'\n \n else: \n logger.warning('task_type ' + str(dataReader['tasks'][task]['task_type']) + \" not known\")\n exit(1)\n\n if 'metric' not in decoder:\n decoder['metric'] = 'acc'\n if decoder['metric'] == 'span_f1':\n decoder['metric'] = 'machamp_span_f1'\n orderedStuff[task] = [orderIdx, curTrans, curLayer]\n\n # save stuff in mergedSettings\n mergedSettings['model']['decoders'][task] = decoder\n dataReader['tasks'][task] = copy.deepcopy(decoder)\n mergedSettings['dataset_reader']['datasets'][dataset] = dataReader\n # Rob: we definitely do not want to cheat and add dev and test labels here\n mergedSettings[\"datasets_for_vocab_creation\"] = [\"train\"]\n \n del mergedSettings['model']['default_dataset']\n\n # to support reading from multiple files we add them to the datasetreader constructor instead\n # the following ones are there just here to make allennlp happy\n mergedSettings['train_data_path'] = 'train'\n mergedSettings['validation_data_path'] = 'dev'\n if 'test_data_path' in dataset_config[dataset]:\n mergedSettings['test_data_path'] = 'test'\n \n # generate ordered lists, which make it easier to use in the machamp model\n orderedTasks = []\n orderedTaskTypes = []\n orderedLayers = []\n for label, idx in sorted(orderedStuff.items(), key=lambda item: item[1]):\n orderedTasks.append(label)\n orderedTaskTypes.append(orderedStuff[label][1])\n orderedLayers.append(orderedStuff[label][2])\n mergedSettings['model']['tasks'] = orderedTasks\n mergedSettings['model']['task_types'] = orderedTaskTypes\n mergedSettings['model']['layers_for_tasks'] = orderedLayers\n \n mergedSettings['model']['decoders'][orderedTasks[0]]['prev_task'] = None\n for taskIdx, task in enumerate(orderedTasks[1:]):\n mergedSettings['model']['decoders'][task]['prev_task'] = orderedTasks[taskIdx] \n #TODO shouldnt this be -1?\n for task in orderedTasks:\n mergedSettings['model']['decoders'][task]['task_types'] = orderedTaskTypes \n mergedSettings['model']['decoders'][task]['tasks'] = orderedTasks \n #taskIdx is not +1, because first item is skipped\n\n # remove items from tagdecoder, as they are not neccesary there\n for item in ['task_type', 'dataset', 'column_idx', 'layer', 'order']:\n for task in mergedSettings['model']['decoders']:\n if item in mergedSettings['model']['decoders'][task]:\n del mergedSettings['model']['decoders'][task][item]\n\n \n if 'trainer' in overrides and 'cuda_device' in overrides['trainer']:\n mergedSettings['trainer']['cuda_device'] = overrides['trainer']['cuda_device']\n #import pprint\n #pprint.pprint(mergedSettings.as_dict())\n #exit(1)\n numSents = 0\n for dataset in mergedSettings['dataset_reader']['datasets']:\n trainPath = mergedSettings['dataset_reader']['datasets'][dataset]['train']\n numSents += countLines(trainPath)\n warmup = int(numSents/mergedSettings['iterator']['batch_size'])\n mergedSettings['trainer']['learning_rate_scheduler']['warmup_steps'] = warmup\n mergedSettings['trainer']['learning_rate_scheduler']['start_step'] = warmup\n mergedSettings['model']['bert_path'] = mergedSettings['dataset_reader']['token_indexers']['bert']['pretrained_model']\n\n #TODO, this will result in the same as appending _tags , however, the \n # warning will still be there... this can be circumvented by copying \n # allennlp.data.fields.sequence_label_field and add a smarter check...\n #mergedSettings['vocabulary'] = {'non_padded_namespaces': ['ne1']}\n return Params(mergedSettings)", "def _resolve_duplicates(self) -> None:\n resource_ids_resources: DefaultDict[str, List[Resource]] = defaultdict(list)\n for resource in self.resources:\n resource_ids_resources[resource.resource_id].append(resource)\n merged_resources: List[Resource] = []\n for resource_id, resources in resource_ids_resources.items():\n if len(resources) > 1:\n merged_resource = ResourceSpec.merge_resources(\n resource_id=resource_id, resources=resources\n )\n merged_resources.append(merged_resource)\n for merged_resource in merged_resources:\n self.resources = [\n resource\n for resource in self.resources\n if resource.resource_id != merged_resource.resource_id\n ]\n self.resources.append(merged_resource)", "def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )", "def update_dataset_resources(dataset_entity): \n global args, ckan_client\n \n # Initialize an empty array of resources\n resources = []\n \n # If the dataset has existing resources, update them\n if ('resources' in dataset_entity):\n resources = dataset_entity['resources']\n \n # Construct the file resource download urls\n dataset_file_name = get_dataset_filename() \n \n # Get the dataset title (short name)\n title = args.dataset_title\n \n # Export to the various file formats\n if 'shp' in args.formats:\n \n shp_resource = get_resource_by_format(resources, 'shp')\n \n if (shp_resource is None):\n logger.info('Creating new SHP resource')\n shp_resource = {}\n resources.append(shp_resource)\n else: \n logger.info('Updating SHP resource')\n \n shp_resource['name'] = title + ' - SHP'\n shp_resource['description'] = title + ' - Shapefile'\n shp_resource['url'] = args.download_url + dataset_file_name + '/shape/' + dataset_file_name + '.zip'\n shp_resource['mimetype'] = 'application/zip'\n shp_resource['format'] = 'shp'\n shp_resource['resource_type'] = 'file'\n\n # Get the size of the file\n file_size = get_file_size(output_folder + '\\\\shape\\\\' + dataset_file_name + '.zip')\n if file_size:\n shp_resource['size'] = file_size \n\n if 'dwg' in args.formats:\n\n dwg_resource = get_resource_by_format(resources, 'dwg')\n \n if (dwg_resource is None):\n logger.info('Creating new DWG resource')\n dwg_resource = {}\n resources.append(dwg_resource) \n else: \n logger.info('Updating DWG resource')\n \n dwg_resource['name'] = title + ' - DWG'\n dwg_resource['description'] = title + ' - AutoCAD DWG'\n dwg_resource['url'] = args.download_url + dataset_file_name + '/cad/' + dataset_file_name + '.dwg'\n dwg_resource['mimetype'] = 'application/acad'\n dwg_resource['format'] = 'dwg'\n dwg_resource['resource_type'] = 'file'\n\n # Get the size of the file\n file_size = get_file_size(output_folder + '\\\\cad\\\\' + dataset_file_name + '.dwg')\n if file_size:\n dwg_resource['size'] = file_size \n\n if 'kml' in args.formats:\n \n kml_resource = get_resource_by_format(resources, 'kml')\n \n if (kml_resource is None):\n logger.info('Creating new KML resource') \n kml_resource = {}\n resources.append(kml_resource)\n else: \n logger.info('Updating KML resource')\n\n kml_resource['name'] = title + ' - KML'\n kml_resource['description'] = title + ' - Google KML'\n kml_resource['url'] = args.download_url + dataset_file_name + '/kml/' + dataset_file_name + '.kmz'\n kml_resource['mimetype'] = 'application/vnd.google-earth.kmz'\n kml_resource['format'] = 'kml'\n kml_resource['resource_type'] = 'file'\n \n # Get the size of the file\n file_size = get_file_size(output_folder + '\\\\kml\\\\' + dataset_file_name + '.kmz')\n if file_size:\n kml_resource['size'] = file_size \n\n if 'csv' in args.formats:\n \n csv_resource = get_resource_by_format(resources, 'csv')\n \n if (csv_resource is None):\n logger.info('Creating new CSV resource')\n csv_resource = {}\n resources.append(csv_resource)\n else: \n logger.info('Updating CSV resource')\n\n csv_resource['name'] = title + ' - CSV'\n csv_resource['description'] = title + ' - Comma-Separated Values'\n csv_resource['url'] = args.download_url + dataset_file_name + '/csv/' + dataset_file_name + '.csv'\n csv_resource['mimetype'] = 'text/csv'\n csv_resource['format'] = 'csv'\n csv_resource['resource_type'] = 'file'\n \n # Get the size of the file\n file_size = get_file_size(output_folder + '\\\\csv\\\\' + dataset_file_name + '.csv')\n if file_size:\n csv_resource['size'] = file_size \n\n if 'metadata' in args.formats:\n \n metadata_resource = get_resource_by_format(resources, 'XML')\n \n if (metadata_resource is None):\n logger.info('Creating new Metadata resource') \n metadata_resource = {}\n resources.append(metadata_resource)\n else: \n logger.info('Updating Metadata resource')\n\n metadata_resource['name'] = title + ' - Metadata'\n metadata_resource['description'] = title + ' - Metadata'\n metadata_resource['url'] = args.download_url + dataset_file_name + '/metadata/' + dataset_file_name + '.xml'\n metadata_resource['mimetype'] = 'application/xml'\n metadata_resource['format'] = 'xml'\n metadata_resource['resource_type'] = 'metadata'\n \n # Get the size of the file\n file_size = get_file_size(output_folder + '\\\\metadata\\\\' + dataset_file_name + '.xml')\n if file_size:\n metadata_resource['size'] = file_size \n \n if 'gdb' in args.formats:\n \n gdb_resource = get_resource_by_format(resources, 'gdb')\n \n if (gdb_resource is None):\n logger.info('Creating new gdb resource')\n gdb_resource = {}\n resources.append(gdb_resource)\n else: \n logger.info('Updating GDB resource')\n \n gdb_resource['name'] = title + ' - GDB'\n gdb_resource['description'] = title + ' - Esri File Geodatabase'\n gdb_resource['url'] = args.download_url + dataset_file_name + '/gdb/' + dataset_file_name + '.zip'\n gdb_resource['mimetype'] = 'application/zip'\n gdb_resource['format'] = 'gdb'\n gdb_resource['resource_type'] = 'file'\n \n # Get the size of the file\n file_size = get_file_size(output_folder + '\\\\gdb\\\\' + dataset_file_name + '.zip')\n if file_size:\n gdb_resource['size'] = file_size \n \n # Update the resources on the dataset \n dataset_entity['resources'] = resources;\n \n return dataset_entity", "def _merge_autoload_details(autoload_details1, autoload_details2):\r\n for attribute in autoload_details2.attributes:\r\n autoload_details1.attributes.append(attribute)\r\n for resource in autoload_details2.resources:\r\n autoload_details1.resources.append(resource)\r\n return autoload_details1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Create dictionary based on mapping PLWN on SUMO ontology file. Dictionary format and mapping PLWN on SUMO ontology file format are presented below.
def get_plwn2sumo_dict(self): if not os.path.exists(self.resources().mapping_sumo_file()): raise IOError( "%s file not found!" % \ self.resources().mapping_sumo_file() ) plwn2sumo_dict = defaultdict(set) with open(self.resources().mapping_sumo_file()) as sumofile: next(sumofile) for line in sumofile: synset_id = int(line.strip().split(';')[0]) sumo = line.strip().split(';')[-2] plwn2sumo_dict[sumo].add(synset_id) return plwn2sumo_dict
[ "def populateMappingDictionary(self):\n with open(\"mapping.txt\", \"r\") as file_object:\n for line in file_object:\n splitline = line.split()\n self.mapping[' '.join(splitline[1:])] = splitline[0]", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANPR').get('abstractTypes')\n exolinks = globalMap.get('ANPR').get('exolinks')\n\n # DataType ScaleFunction\n currentMap = {}\n abstractTypes['ScaleFunction'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008'] = currentMap\n loadMaps['ANPR.ScaleFunction'] = currentMap\n currentMap['tag'] = 'ANPR.ScaleFunction'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnnealProtocol\n currentMap = {}\n abstractTypes['AnnealProtocol'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00004'] = currentMap\n loadMaps['ANPR.AnnealProtocol'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'annealProtocols'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocol\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnnealProtocol.application\n currentMap = {}\n contentMap['application'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocol.application'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.application'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00003'\n currentMap['name'] = 'application'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnnealProtocol.applicationVersion\n currentMap = {}\n contentMap['applicationVersion'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00002'] = currentMap\n loadMaps['ANPR.AnnealProtocol.applicationVersion'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.applicationVersion'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00002'\n currentMap['name'] = 'applicationVersion'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00004'] = currentMap\n loadMaps['ANPR.AnnealProtocol.code'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00004'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocol.details'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00003'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute AnnealProtocol.methodStoreName\n currentMap = {}\n contentMap['methodStoreName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00001'] = currentMap\n loadMaps['ANPR.AnnealProtocol.methodStoreName'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.methodStoreName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00001'\n currentMap['name'] = 'methodStoreName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00002'] = currentMap\n loadMaps['ANPR.AnnealProtocol.name'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00002'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AnnealProtocol.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnnealProtocol.annealStages\n currentMap = {}\n contentMap['annealStages'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00002'] = currentMap\n loadMaps['ANPR.AnnealProtocol.annealStages'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.annealStages'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00002'\n currentMap['name'] = 'annealStages'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n\n # Role AnnealProtocol.energyTerms\n currentMap = {}\n contentMap['energyTerms'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:08_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocol.energyTerms'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.energyTerms'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:08_00003'\n currentMap['name'] = 'energyTerms'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n\n # Role AnnealProtocol.software\n currentMap = {}\n contentMap['software'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-18-10:52:14_00001'] = currentMap\n loadMaps['ANPR.AnnealProtocol.software'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.software'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-18-10:52:14_00001'\n currentMap['name'] = 'software'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('METH').get('exolinks')\n # End of AnnealProtocol\n\n currentMap = abstractTypes.get('AnnealProtocol')\n aList = ['application', 'applicationVersion', 'code', 'details', 'methodStoreName', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['energyTerms', 'annealStages', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['annealStages', 'energyTerms']\n currentMap['children'] = aList\n\n # Class AnnealProtocolStore\n currentMap = {}\n abstractTypes['AnnealProtocolStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'annealProtocolStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocolStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnnealProtocolStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnnealProtocolStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnnealProtocolStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnnealProtocolStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnnealProtocolStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnnealProtocolStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00009'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore.name'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00009'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AnnealProtocolStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnnealProtocolStore.annealProtocols\n currentMap = {}\n contentMap['annealProtocols'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00004'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore.annealProtocols'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore.annealProtocols'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00004'\n currentMap['name'] = 'annealProtocols'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n\n # Role AnnealProtocolStore.refPotentialTerms\n currentMap = {}\n contentMap['refPotentialTerms'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00008'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore.refPotentialTerms'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore.refPotentialTerms'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00008'\n currentMap['name'] = 'refPotentialTerms'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n # End of AnnealProtocolStore\n\n currentMap = abstractTypes.get('AnnealProtocolStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['refPotentialTerms', 'annealProtocols', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['annealProtocols', 'refPotentialTerms']\n currentMap['children'] = aList\n\n # Class AnnealStage\n currentMap = {}\n abstractTypes['AnnealStage'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00005'] = currentMap\n loadMaps['ANPR.AnnealStage'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'annealStages'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealStage\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnnealStage.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnnealStage.finalTemp\n currentMap = {}\n contentMap['finalTemp'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00014'] = currentMap\n loadMaps['ANPR.AnnealStage.finalTemp'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.finalTemp'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00014'\n currentMap['name'] = 'finalTemp'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Attribute AnnealStage.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00005'] = currentMap\n loadMaps['ANPR.AnnealStage.function'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00005'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'linear'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008')\n\n # Attribute AnnealStage.functionParams\n currentMap = {}\n contentMap['functionParams'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00006'] = currentMap\n loadMaps['ANPR.AnnealStage.functionParams'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.functionParams'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00006'\n currentMap['name'] = 'functionParams'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AnnealStage.initialTemp\n currentMap = {}\n contentMap['initialTemp'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00013'] = currentMap\n loadMaps['ANPR.AnnealStage.initialTemp'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.initialTemp'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00013'\n currentMap['name'] = 'initialTemp'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Attribute AnnealStage.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00011'] = currentMap\n loadMaps['ANPR.AnnealStage.name'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00011'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealStage.numSteps\n currentMap = {}\n contentMap['numSteps'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00012'] = currentMap\n loadMaps['ANPR.AnnealStage.numSteps'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.numSteps'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00012'\n currentMap['name'] = 'numSteps'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001')\n\n # Attribute AnnealStage.numSubSteps\n currentMap = {}\n contentMap['numSubSteps'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00004'] = currentMap\n loadMaps['ANPR.AnnealStage.numSubSteps'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.numSubSteps'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00004'\n currentMap['name'] = 'numSubSteps'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001')\n\n # Attribute AnnealStage.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00010'] = currentMap\n loadMaps['ANPR.AnnealStage.serial'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00010'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AnnealStage.timeStep\n currentMap = {}\n contentMap['timeStep'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-14:33:51_00001'] = currentMap\n loadMaps['ANPR.AnnealStage.timeStep'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.timeStep'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-14:33:51_00001'\n currentMap['name'] = 'timeStep'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute AnnealStage.timeStepScaling\n currentMap = {}\n contentMap['timeStepScaling'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-14:47:43_00001'] = currentMap\n loadMaps['ANPR.AnnealStage.timeStepScaling'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.timeStepScaling'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-14:47:43_00001'\n currentMap['name'] = 'timeStepScaling'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Role AnnealStage.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnnealStage.potentialScales\n currentMap = {}\n contentMap['potentialScales'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00015'] = currentMap\n loadMaps['ANPR.AnnealStage.potentialScales'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.potentialScales'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00015'\n currentMap['name'] = 'potentialScales'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n # End of AnnealStage\n\n currentMap = abstractTypes.get('AnnealStage')\n aList = ['finalTemp', 'function', 'initialTemp', 'numSteps', 'numSubSteps', 'serial', 'timeStep', 'timeStepScaling']\n currentMap['headerAttrs'] = aList\n aList = ['functionParams', 'name', 'potentialScales']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class EnergyTerm\n currentMap = {}\n abstractTypes['EnergyTerm'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00006'] = currentMap\n loadMaps['ANPR.EnergyTerm'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'energyTerms'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = molsim.api.AnnealProtocol.EnergyTerm\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute EnergyTerm.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute EnergyTerm.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00001'] = currentMap\n loadMaps['ANPR.EnergyTerm.code'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00001'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute EnergyTerm.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00011'] = currentMap\n loadMaps['ANPR.EnergyTerm.details'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00011'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute EnergyTerm.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00006'] = currentMap\n loadMaps['ANPR.EnergyTerm.name'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00006'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute EnergyTerm.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00005'] = currentMap\n loadMaps['ANPR.EnergyTerm.serial'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role EnergyTerm.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role EnergyTerm.potentialScales\n currentMap = {}\n contentMap['potentialScales'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00002'] = currentMap\n loadMaps['ANPR.EnergyTerm.potentialScales'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.potentialScales'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00002'\n currentMap['name'] = 'potentialScales'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n # End of EnergyTerm\n\n currentMap = abstractTypes.get('EnergyTerm')\n aList = ['serial']\n currentMap['headerAttrs'] = aList\n aList = ['code', 'details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['potentialScales', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['potentialScales']\n currentMap['children'] = aList\n\n # Class PotentialScale\n currentMap = {}\n abstractTypes['PotentialScale'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00007'] = currentMap\n loadMaps['ANPR.PotentialScale'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00007'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'potentialScales'\n currentMap['class'] = molsim.api.AnnealProtocol.PotentialScale\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute PotentialScale.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute PotentialScale.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-14:31:19_00011'] = currentMap\n loadMaps['ANPR.PotentialScale.code'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-14:31:19_00011'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = 'std'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute PotentialScale.finalScale\n currentMap = {}\n contentMap['finalScale'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00018'] = currentMap\n loadMaps['ANPR.PotentialScale.finalScale'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.finalScale'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00018'\n currentMap['name'] = 'finalScale'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Attribute PotentialScale.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00019'] = currentMap\n loadMaps['ANPR.PotentialScale.function'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00019'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'linear'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008')\n\n # Attribute PotentialScale.functionParams\n currentMap = {}\n contentMap['functionParams'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00020'] = currentMap\n loadMaps['ANPR.PotentialScale.functionParams'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.functionParams'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00020'\n currentMap['name'] = 'functionParams'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute PotentialScale.initialScale\n currentMap = {}\n contentMap['initialScale'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00017'] = currentMap\n loadMaps['ANPR.PotentialScale.initialScale'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.initialScale'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00017'\n currentMap['name'] = 'initialScale'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role PotentialScale.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role PotentialScale.annealStage\n currentMap = {}\n contentMap['annealStage'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00016'] = currentMap\n loadMaps['ANPR.PotentialScale.annealStage'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.annealStage'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00016'\n currentMap['name'] = 'annealStage'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['copyOverride'] = True\n\n # Role PotentialScale.refPotentialTerm\n currentMap = {}\n contentMap['refPotentialTerm'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00010'] = currentMap\n loadMaps['ANPR.PotentialScale.refPotentialTerm'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.refPotentialTerm'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00010'\n currentMap['name'] = 'refPotentialTerm'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['copyOverride'] = True\n # End of PotentialScale\n\n currentMap = abstractTypes.get('PotentialScale')\n aList = ['finalScale', 'function', 'initialScale']\n currentMap['headerAttrs'] = aList\n aList = ['code', 'functionParams']\n currentMap['simpleAttrs'] = aList\n aList = ['annealStage', 'refPotentialTerm']\n currentMap['optLinks'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefPotentialTerm\n currentMap = {}\n abstractTypes['RefPotentialTerm'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00009'] = currentMap\n loadMaps['ANPR.RefPotentialTerm'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00009'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refPotentialTerms'\n currentMap['class'] = molsim.api.AnnealProtocol.RefPotentialTerm\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefPotentialTerm.application\n currentMap = {}\n contentMap['application'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00004'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.application'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.application'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00004'\n currentMap['name'] = 'application'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = 'general'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefPotentialTerm.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefPotentialTerm.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00005'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.code'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00005'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefPotentialTerm.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00002'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.details'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00002'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute RefPotentialTerm.index\n currentMap = {}\n contentMap['index'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00006'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.index'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.index'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00006'\n currentMap['name'] = 'index'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001')\n\n # Attribute RefPotentialTerm.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00001'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.name'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role RefPotentialTerm.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefPotentialTerm.potentialScales\n currentMap = {}\n contentMap['potentialScales'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00009'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.potentialScales'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.potentialScales'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00009'\n currentMap['name'] = 'potentialScales'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role RefPotentialTerm.refTermParameters\n currentMap = {}\n contentMap['refTermParameters'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00003'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.refTermParameters'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.refTermParameters'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00003'\n currentMap['name'] = 'refTermParameters'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n # End of RefPotentialTerm\n\n currentMap = abstractTypes.get('RefPotentialTerm')\n aList = ['index']\n currentMap['headerAttrs'] = aList\n aList = ['application', 'code', 'details', 'name', 'potentialScales']\n currentMap['simpleAttrs'] = aList\n aList = ['refTermParameters', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['refTermParameters']\n currentMap['children'] = aList\n\n # Class RefTermParameter\n currentMap = {}\n abstractTypes['RefTermParameter'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:32:58_00001'] = currentMap\n loadMaps['ANPR.RefTermParameter'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:32:58_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refTermParameters'\n currentMap['objkey'] = 'code'\n currentMap['class'] = molsim.api.AnnealProtocol.RefTermParameter\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefTermParameter.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefTermParameter.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00007'] = currentMap\n loadMaps['ANPR.RefTermParameter.code'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00007'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefTermParameter.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00010'] = currentMap\n loadMaps['ANPR.RefTermParameter.details'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00010'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute RefTermParameter.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00009'] = currentMap\n loadMaps['ANPR.RefTermParameter.name'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00009'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefTermParameter.value\n currentMap = {}\n contentMap['value'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00008'] = currentMap\n loadMaps['ANPR.RefTermParameter.value'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.value'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00008'\n currentMap['name'] = 'value'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Role RefTermParameter.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of RefTermParameter\n\n currentMap = abstractTypes.get('RefTermParameter')\n aList = ['value']\n currentMap['headerAttrs'] = aList\n aList = ['code', 'details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnnealProtocol\n currentMap = {}\n exolinks['AnnealProtocol'] = currentMap\n loadMaps['ANPR.exo-AnnealProtocol'] = currentMap\n currentMap['tag'] = 'ANPR.exo-AnnealProtocol'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00004'\n currentMap['name'] = 'AnnealProtocol'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocol\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to AnnealProtocolStore\n currentMap = {}\n exolinks['AnnealProtocolStore'] = currentMap\n loadMaps['ANPR.exo-AnnealProtocolStore'] = currentMap\n currentMap['tag'] = 'ANPR.exo-AnnealProtocolStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00003'\n currentMap['name'] = 'AnnealProtocolStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocolStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to AnnealStage\n currentMap = {}\n exolinks['AnnealStage'] = currentMap\n loadMaps['ANPR.exo-AnnealStage'] = currentMap\n currentMap['tag'] = 'ANPR.exo-AnnealStage'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00005'\n currentMap['name'] = 'AnnealStage'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealStage\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to EnergyTerm\n currentMap = {}\n exolinks['EnergyTerm'] = currentMap\n loadMaps['ANPR.exo-EnergyTerm'] = currentMap\n currentMap['tag'] = 'ANPR.exo-EnergyTerm'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00006'\n currentMap['name'] = 'EnergyTerm'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.EnergyTerm\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to PotentialScale\n currentMap = {}\n exolinks['PotentialScale'] = currentMap\n loadMaps['ANPR.exo-PotentialScale'] = currentMap\n currentMap['tag'] = 'ANPR.exo-PotentialScale'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00007'\n currentMap['name'] = 'PotentialScale'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.PotentialScale\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(globalMap.get('ANPR').get('exolinks'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to RefPotentialTerm\n currentMap = {}\n exolinks['RefPotentialTerm'] = currentMap\n loadMaps['ANPR.exo-RefPotentialTerm'] = currentMap\n currentMap['tag'] = 'ANPR.exo-RefPotentialTerm'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00009'\n currentMap['name'] = 'RefPotentialTerm'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.RefPotentialTerm\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001'))\n\n # Out-of-package link to RefTermParameter\n currentMap = {}\n exolinks['RefTermParameter'] = currentMap\n loadMaps['ANPR.exo-RefTermParameter'] = currentMap\n currentMap['tag'] = 'ANPR.exo-RefTermParameter'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:32:58_00001'\n currentMap['name'] = 'RefTermParameter'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.RefTermParameter\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('MOLE').get('abstractTypes')\n exolinks = globalMap.get('MOLE').get('exolinks')\n\n # DataType LinkDihedralAngle\n currentMap = {}\n abstractTypes['LinkDihedralAngle'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00050'] = currentMap\n loadMaps['MOLE.LinkDihedralAngle'] = currentMap\n currentMap['tag'] = 'MOLE.LinkDihedralAngle'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00050'\n currentMap['toStr'] = basicDataTypes.Int.toString\n currentMap['cnvrt'] = basicDataTypes.Int.fromString\n\n # DataType MolType\n currentMap = {}\n abstractTypes['MolType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00055'] = currentMap\n loadMaps['MOLE.MolType'] = currentMap\n currentMap['tag'] = 'MOLE.MolType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00055'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType SecStrucCode\n currentMap = {}\n abstractTypes['SecStrucCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00053'] = currentMap\n loadMaps['MOLE.SecStrucCode'] = currentMap\n currentMap['tag'] = 'MOLE.SecStrucCode'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00053'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType SmilesType\n currentMap = {}\n abstractTypes['SmilesType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00051'] = currentMap\n loadMaps['MOLE.SmilesType'] = currentMap\n currentMap['tag'] = 'MOLE.SmilesType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00051'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class Alignment\n currentMap = {}\n abstractTypes['Alignment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:11:54_00001'] = currentMap\n loadMaps['MOLE.Alignment'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:11:54_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'alignments'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.Molecule.Alignment\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Alignment.alignLength\n currentMap = {}\n contentMap['alignLength'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00003'] = currentMap\n loadMaps['MOLE.Alignment.alignLength'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.alignLength'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00003'\n currentMap['name'] = 'alignLength'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Alignment.alignmentProgram\n currentMap = {}\n contentMap['alignmentProgram'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00005'] = currentMap\n loadMaps['MOLE.Alignment.alignmentProgram'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.alignmentProgram'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00005'\n currentMap['name'] = 'alignmentProgram'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Alignment.alignmentScore\n currentMap = {}\n contentMap['alignmentScore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00004'] = currentMap\n loadMaps['MOLE.Alignment.alignmentScore'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.alignmentScore'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00004'\n currentMap['name'] = 'alignmentScore'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Alignment.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Alignment.dbRefAlignBegin\n currentMap = {}\n contentMap['dbRefAlignBegin'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00005'] = currentMap\n loadMaps['MOLE.Alignment.dbRefAlignBegin'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.dbRefAlignBegin'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00005'\n currentMap['name'] = 'dbRefAlignBegin'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Alignment.dbRefAlignEnd\n currentMap = {}\n contentMap['dbRefAlignEnd'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00006'] = currentMap\n loadMaps['MOLE.Alignment.dbRefAlignEnd'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.dbRefAlignEnd'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00006'\n currentMap['name'] = 'dbRefAlignEnd'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Alignment.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00006'] = currentMap\n loadMaps['MOLE.Alignment.details'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00006'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute Alignment.homologyRatio\n currentMap = {}\n contentMap['homologyRatio'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00004'] = currentMap\n loadMaps['MOLE.Alignment.homologyRatio'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.homologyRatio'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00004'\n currentMap['name'] = 'homologyRatio'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00058')\n\n # Attribute Alignment.nIdentical\n currentMap = {}\n contentMap['nIdentical'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00001'] = currentMap\n loadMaps['MOLE.Alignment.nIdentical'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.nIdentical'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00001'\n currentMap['name'] = 'nIdentical'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Alignment.nPositive\n currentMap = {}\n contentMap['nPositive'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00002'] = currentMap\n loadMaps['MOLE.Alignment.nPositive'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.nPositive'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00002'\n currentMap['name'] = 'nPositive'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Alignment.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00003'] = currentMap\n loadMaps['MOLE.Alignment.serial'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00003'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role Alignment.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Alignment.dbRef\n currentMap = {}\n contentMap['dbRef'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00002'] = currentMap\n loadMaps['MOLE.Alignment.dbRef'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.dbRef'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:11:59_00002'\n currentMap['name'] = 'dbRef'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('DBR').get('exolinks')\n\n # Role Alignment.molSeqFragment\n currentMap = {}\n contentMap['molSeqFragment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00007'] = currentMap\n loadMaps['MOLE.Alignment.molSeqFragment'] = currentMap\n currentMap['tag'] = 'MOLE.Alignment.molSeqFragment'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00007'\n currentMap['name'] = 'molSeqFragment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of Alignment\n\n currentMap = abstractTypes.get('Alignment')\n aList = ['alignLength', 'alignmentScore', 'dbRefAlignBegin', 'dbRefAlignEnd', 'homologyRatio', 'nIdentical', 'nPositive', 'serial']\n currentMap['headerAttrs'] = aList\n aList = ['alignmentProgram', 'details']\n currentMap['simpleAttrs'] = aList\n aList = ['molSeqFragment']\n currentMap['optLinks'] = aList\n aList = ['dbRef', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class MolResLink\n currentMap = {}\n abstractTypes['MolResLink'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00049'] = currentMap\n loadMaps['MOLE.MolResLink'] = currentMap\n currentMap['tag'] = 'MOLE.MolResLink'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00049'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'molResLinks'\n currentMap['class'] = ccp.api.molecule.Molecule.MolResLink\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute MolResLink.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute MolResLink.dihedralAngle\n currentMap = {}\n contentMap['dihedralAngle'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00046'] = currentMap\n loadMaps['MOLE.MolResLink.dihedralAngle'] = currentMap\n currentMap['tag'] = 'MOLE.MolResLink.dihedralAngle'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00046'\n currentMap['name'] = 'dihedralAngle'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00050')\n\n # Role MolResLink.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role MolResLink.molResLinkEnds\n currentMap = {}\n contentMap['molResLinkEnds'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00045'] = currentMap\n loadMaps['MOLE.MolResLink.molResLinkEnds'] = currentMap\n currentMap['tag'] = 'MOLE.MolResLink.molResLinkEnds'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00045'\n currentMap['name'] = 'molResLinkEnds'\n currentMap['hicard'] = 2\n currentMap['locard'] = 2\n currentMap['copyOverride'] = False\n # End of MolResLink\n\n currentMap = abstractTypes.get('MolResLink')\n aList = ['dihedralAngle']\n currentMap['headerAttrs'] = aList\n aList = ['molResLinkEnds']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class MolResLinkEnd\n currentMap = {}\n abstractTypes['MolResLinkEnd'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00048'] = currentMap\n loadMaps['MOLE.MolResLinkEnd'] = currentMap\n currentMap['tag'] = 'MOLE.MolResLinkEnd'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00048'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'molResLinkEnds'\n currentMap['objkey'] = 'linkCode'\n currentMap['class'] = ccp.api.molecule.Molecule.MolResLinkEnd\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute MolResLinkEnd.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute MolResLinkEnd.linkCode\n currentMap = {}\n contentMap['linkCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00041'] = currentMap\n loadMaps['MOLE.MolResLinkEnd.linkCode'] = currentMap\n currentMap['tag'] = 'MOLE.MolResLinkEnd.linkCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00041'\n currentMap['name'] = 'linkCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role MolResLinkEnd.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role MolResLinkEnd.molResLink\n currentMap = {}\n contentMap['molResLink'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00044'] = currentMap\n loadMaps['MOLE.MolResLinkEnd.molResLink'] = currentMap\n currentMap['tag'] = 'MOLE.MolResLinkEnd.molResLink'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00044'\n currentMap['name'] = 'molResLink'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n # End of MolResLinkEnd\n\n currentMap = abstractTypes.get('MolResLinkEnd')\n aList = ['linkCode']\n currentMap['headerAttrs'] = aList\n aList = ['molResLink']\n currentMap['optLinks'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class MolResidue\n currentMap = {}\n abstractTypes['MolResidue'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00044'] = currentMap\n loadMaps['MOLE.MolResidue'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00044'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'molResidues'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.Molecule.MolResidue\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute MolResidue.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute MolResidue.descriptor\n currentMap = {}\n contentMap['descriptor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00017'] = currentMap\n loadMaps['MOLE.MolResidue.descriptor'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.descriptor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00017'\n currentMap['name'] = 'descriptor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute MolResidue.linking\n currentMap = {}\n contentMap['linking'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00016'] = currentMap\n loadMaps['MOLE.MolResidue.linking'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.linking'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00016'\n currentMap['name'] = 'linking'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00025')\n\n # Attribute MolResidue.seqCode\n currentMap = {}\n contentMap['seqCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00012'] = currentMap\n loadMaps['MOLE.MolResidue.seqCode'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.seqCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00012'\n currentMap['name'] = 'seqCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute MolResidue.seqInsertCode\n currentMap = {}\n contentMap['seqInsertCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00013'] = currentMap\n loadMaps['MOLE.MolResidue.seqInsertCode'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.seqInsertCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00013'\n currentMap['name'] = 'seqInsertCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = ' '\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute MolResidue.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00011'] = currentMap\n loadMaps['MOLE.MolResidue.serial'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00011'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role MolResidue.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role MolResidue.chemComp\n currentMap = {}\n contentMap['chemComp'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00010'] = currentMap\n loadMaps['MOLE.MolResidue.chemComp'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.chemComp'\n currentMap['type'] = 'exotop'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00010'\n currentMap['name'] = 'chemComp'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CHEM').get('exolinks')\n\n # Role MolResidue.chemCompVar\n currentMap = {}\n contentMap['chemCompVar'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00007'] = currentMap\n loadMaps['MOLE.MolResidue.chemCompVar'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.chemCompVar'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00007'\n currentMap['name'] = 'chemCompVar'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CHEM').get('exolinks')\n\n # Role MolResidue.molResLinkEnds\n currentMap = {}\n contentMap['molResLinkEnds'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00009'] = currentMap\n loadMaps['MOLE.MolResidue.molResLinkEnds'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.molResLinkEnds'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00009'\n currentMap['name'] = 'molResLinkEnds'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('MOLE').get('abstractTypes')\n\n # Role MolResidue.molSeqFragment\n currentMap = {}\n contentMap['molSeqFragment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00001'] = currentMap\n loadMaps['MOLE.MolResidue.molSeqFragment'] = currentMap\n currentMap['tag'] = 'MOLE.MolResidue.molSeqFragment'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00001'\n currentMap['name'] = 'molSeqFragment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n # End of MolResidue\n\n currentMap = abstractTypes.get('MolResidue')\n aList = ['linking', 'seqCode', 'serial']\n currentMap['headerAttrs'] = aList\n aList = ['descriptor', 'seqInsertCode']\n currentMap['simpleAttrs'] = aList\n aList = ['molSeqFragment']\n currentMap['optLinks'] = aList\n aList = ['molResLinkEnds', 'chemComp', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['molResLinkEnds']\n currentMap['children'] = aList\n\n # Class MolSeqFragment\n currentMap = {}\n abstractTypes['MolSeqFragment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00045'] = currentMap\n loadMaps['MOLE.MolSeqFragment'] = currentMap\n currentMap['tag'] = 'MOLE.MolSeqFragment'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00045'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'molSeqFragments'\n currentMap['class'] = ccp.api.molecule.Molecule.MolSeqFragment\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute MolSeqFragment.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Role MolSeqFragment.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role MolSeqFragment.alignments\n currentMap = {}\n contentMap['alignments'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00008'] = currentMap\n loadMaps['MOLE.MolSeqFragment.alignments'] = currentMap\n currentMap['tag'] = 'MOLE.MolSeqFragment.alignments'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:39_00008'\n currentMap['name'] = 'alignments'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role MolSeqFragment.limitResidues\n currentMap = {}\n contentMap['limitResidues'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00063'] = currentMap\n loadMaps['MOLE.MolSeqFragment.limitResidues'] = currentMap\n currentMap['tag'] = 'MOLE.MolSeqFragment.limitResidues'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00063'\n currentMap['name'] = 'limitResidues'\n currentMap['hicard'] = 2\n currentMap['locard'] = 2\n currentMap['copyOverride'] = False\n\n # Role MolSeqFragment.naturalSource\n currentMap = {}\n contentMap['naturalSource'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00029'] = currentMap\n loadMaps['MOLE.MolSeqFragment.naturalSource'] = currentMap\n currentMap['tag'] = 'MOLE.MolSeqFragment.naturalSource'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:35_00029'\n currentMap['name'] = 'naturalSource'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('TAXO').get('exolinks')\n # End of MolSeqFragment\n\n currentMap = abstractTypes.get('MolSeqFragment')\n aList = ['alignments', 'limitResidues']\n currentMap['simpleAttrs'] = aList\n aList = ['naturalSource', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Molecule\n currentMap = {}\n abstractTypes['Molecule'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00039'] = currentMap\n loadMaps['MOLE.Molecule'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00039'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'molecules'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.Molecule.Molecule\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Molecule.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Molecule.calcIsoelectricPoint\n currentMap = {}\n contentMap['calcIsoelectricPoint'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00018'] = currentMap\n loadMaps['MOLE.Molecule.calcIsoelectricPoint'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.calcIsoelectricPoint'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00018'\n currentMap['name'] = 'calcIsoelectricPoint'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Molecule.commonNames\n currentMap = {}\n contentMap['commonNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00014'] = currentMap\n loadMaps['MOLE.Molecule.commonNames'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.commonNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00014'\n currentMap['name'] = 'commonNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Molecule.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Molecule.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00026'] = currentMap\n loadMaps['MOLE.Molecule.details'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00026'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute Molecule.fragmentDetails\n currentMap = {}\n contentMap['fragmentDetails'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-01-19-14:20:59_00001'] = currentMap\n loadMaps['MOLE.Molecule.fragmentDetails'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.fragmentDetails'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-01-19-14:20:59_00001'\n currentMap['name'] = 'fragmentDetails'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Molecule.functions\n currentMap = {}\n contentMap['functions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00016'] = currentMap\n loadMaps['MOLE.Molecule.functions'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.functions'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00016'\n currentMap['name'] = 'functions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Molecule.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute Molecule.isFinalised\n currentMap = {}\n contentMap['isFinalised'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00012'] = currentMap\n loadMaps['MOLE.Molecule.isFinalised'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.isFinalised'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00012'\n currentMap['name'] = 'isFinalised'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Molecule.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Molecule.keywords\n currentMap = {}\n contentMap['keywords'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00015'] = currentMap\n loadMaps['MOLE.Molecule.keywords'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.keywords'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00015'\n currentMap['name'] = 'keywords'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Molecule.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Molecule.longName\n currentMap = {}\n contentMap['longName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00011'] = currentMap\n loadMaps['MOLE.Molecule.longName'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.longName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00011'\n currentMap['name'] = 'longName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Molecule.mutationDetails\n currentMap = {}\n contentMap['mutationDetails'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-01-19-14:20:59_00002'] = currentMap\n loadMaps['MOLE.Molecule.mutationDetails'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.mutationDetails'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-01-19-14:20:59_00002'\n currentMap['name'] = 'mutationDetails'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Molecule.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00010'] = currentMap\n loadMaps['MOLE.Molecule.name'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00010'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Molecule.seqDetails\n currentMap = {}\n contentMap['seqDetails'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00027'] = currentMap\n loadMaps['MOLE.Molecule.seqDetails'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.seqDetails'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00027'\n currentMap['name'] = 'seqDetails'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Molecule.smiles\n currentMap = {}\n contentMap['smiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00024'] = currentMap\n loadMaps['MOLE.Molecule.smiles'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.smiles'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00024'\n currentMap['name'] = 'smiles'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute Molecule.smilesType\n currentMap = {}\n contentMap['smilesType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00025'] = currentMap\n loadMaps['MOLE.Molecule.smilesType'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.smilesType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00025'\n currentMap['name'] = 'smilesType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00051')\n\n # Role Molecule.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Molecule.alignments\n currentMap = {}\n contentMap['alignments'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-26-10:15:40_00002'] = currentMap\n loadMaps['MOLE.Molecule.alignments'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.alignments'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:15:40_00002'\n currentMap['name'] = 'alignments'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('MOLE').get('abstractTypes')\n\n # Role Molecule.citations\n currentMap = {}\n contentMap['citations'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:33_00051'] = currentMap\n loadMaps['MOLE.Molecule.citations'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.citations'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:33_00051'\n currentMap['name'] = 'citations'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CITA').get('exolinks')\n\n # Role Molecule.molResLinks\n currentMap = {}\n contentMap['molResLinks'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00007'] = currentMap\n loadMaps['MOLE.Molecule.molResLinks'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.molResLinks'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00007'\n currentMap['name'] = 'molResLinks'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('MOLE').get('abstractTypes')\n\n # Role Molecule.molResidues\n currentMap = {}\n contentMap['molResidues'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:33_00047'] = currentMap\n loadMaps['MOLE.Molecule.molResidues'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.molResidues'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:33_00047'\n currentMap['name'] = 'molResidues'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('MOLE').get('abstractTypes')\n\n # Role Molecule.molSeqFragments\n currentMap = {}\n contentMap['molSeqFragments'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00009'] = currentMap\n loadMaps['MOLE.Molecule.molSeqFragments'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.molSeqFragments'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00009'\n currentMap['name'] = 'molSeqFragments'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('MOLE').get('abstractTypes')\n\n # Role Molecule.moleculeSysNames\n currentMap = {}\n contentMap['moleculeSysNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:33_00049'] = currentMap\n loadMaps['MOLE.Molecule.moleculeSysNames'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.moleculeSysNames'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:33_00049'\n currentMap['name'] = 'moleculeSysNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('MOLE').get('abstractTypes')\n\n # Role Molecule.naturalSource\n currentMap = {}\n contentMap['naturalSource'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00002'] = currentMap\n loadMaps['MOLE.Molecule.naturalSource'] = currentMap\n currentMap['tag'] = 'MOLE.Molecule.naturalSource'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00002'\n currentMap['name'] = 'naturalSource'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('TAXO').get('exolinks')\n # End of Molecule\n\n currentMap = abstractTypes.get('Molecule')\n aList = ['calcIsoelectricPoint', 'createdBy', 'guid', 'isFinalised', 'isModifiable', 'lastUnlockedBy', 'smilesType']\n currentMap['headerAttrs'] = aList\n aList = ['commonNames', 'details', 'fragmentDetails', 'functions', 'keywords', 'longName', 'mutationDetails', 'name', 'seqDetails', 'smiles']\n currentMap['simpleAttrs'] = aList\n aList = ['moleculeSysNames', 'molSeqFragments', 'molResidues', 'molResLinks', 'alignments', 'naturalSource', 'citations', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['alignments', 'molResLinks', 'molResidues', 'molSeqFragments', 'moleculeSysNames']\n currentMap['children'] = aList\n\n # Class MoleculeSysName\n currentMap = {}\n abstractTypes['MoleculeSysName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00043'] = currentMap\n loadMaps['MOLE.MoleculeSysName'] = currentMap\n currentMap['tag'] = 'MOLE.MoleculeSysName'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00043'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'moleculeSysNames'\n currentMap['objkey'] = 'namingSystem'\n currentMap['class'] = ccp.api.molecule.Molecule.MoleculeSysName\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute MoleculeSysName.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute MoleculeSysName.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00061'] = currentMap\n loadMaps['MOLE.MoleculeSysName.name'] = currentMap\n currentMap['tag'] = 'MOLE.MoleculeSysName.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00061'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute MoleculeSysName.namingSystem\n currentMap = {}\n contentMap['namingSystem'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00060'] = currentMap\n loadMaps['MOLE.MoleculeSysName.namingSystem'] = currentMap\n currentMap['tag'] = 'MOLE.MoleculeSysName.namingSystem'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:34_00060'\n currentMap['name'] = 'namingSystem'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role MoleculeSysName.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of MoleculeSysName\n\n currentMap = abstractTypes.get('MoleculeSysName')\n aList = ['namingSystem']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to Alignment\n currentMap = {}\n exolinks['Alignment'] = currentMap\n loadMaps['MOLE.exo-Alignment'] = currentMap\n currentMap['tag'] = 'MOLE.exo-Alignment'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-26-10:11:54_00001'\n currentMap['name'] = 'Alignment'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.Molecule.Alignment\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to MolResLink\n currentMap = {}\n exolinks['MolResLink'] = currentMap\n loadMaps['MOLE.exo-MolResLink'] = currentMap\n currentMap['tag'] = 'MOLE.exo-MolResLink'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00049'\n currentMap['name'] = 'MolResLink'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.Molecule.MolResLink\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(globalMap.get('MOLE').get('exolinks'))\n aList.append(globalMap.get('MOLE').get('exolinks'))\n\n # Out-of-package link to MolResLinkEnd\n currentMap = {}\n exolinks['MolResLinkEnd'] = currentMap\n loadMaps['MOLE.exo-MolResLinkEnd'] = currentMap\n currentMap['tag'] = 'MOLE.exo-MolResLinkEnd'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00048'\n currentMap['name'] = 'MolResLinkEnd'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.Molecule.MolResLinkEnd\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to MolResidue\n currentMap = {}\n exolinks['MolResidue'] = currentMap\n loadMaps['MOLE.exo-MolResidue'] = currentMap\n currentMap['tag'] = 'MOLE.exo-MolResidue'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00044'\n currentMap['name'] = 'MolResidue'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.Molecule.MolResidue\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to MolSeqFragment\n currentMap = {}\n exolinks['MolSeqFragment'] = currentMap\n loadMaps['MOLE.exo-MolSeqFragment'] = currentMap\n currentMap['tag'] = 'MOLE.exo-MolSeqFragment'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00045'\n currentMap['name'] = 'MolSeqFragment'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.Molecule.MolSeqFragment\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(globalMap.get('MOLE').get('exolinks'))\n aList.append(globalMap.get('MOLE').get('exolinks'))\n\n # Out-of-package link to Molecule\n currentMap = {}\n exolinks['Molecule'] = currentMap\n loadMaps['MOLE.exo-Molecule'] = currentMap\n currentMap['tag'] = 'MOLE.exo-Molecule'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00039'\n currentMap['name'] = 'Molecule'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.Molecule.Molecule\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to MoleculeSysName\n currentMap = {}\n exolinks['MoleculeSysName'] = currentMap\n loadMaps['MOLE.exo-MoleculeSysName'] = currentMap\n currentMap['tag'] = 'MOLE.exo-MoleculeSysName'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00043'\n currentMap['name'] = 'MoleculeSysName'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.Molecule.MoleculeSysName\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))", "def generate_antonym_pairs(config: SettingConfig) -> dict:\n print(f\"Generating initial antonym pairs from RoWordNet @ {datetime.now()}\")\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get the outbound relations of type antonym from\n outbound_relations = filter(lambda x: x[1] == 'near_antonym', wn.outbound_relations(synset_id))\n\n # Iterate outbound relations\n for relation in outbound_relations:\n # Get the synset corresponding to the target of the outbound relation\n target_synset = wn.synset(relation[0])\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_cross_synset_pairs(synset, target_synset)\n\n # Add the current set of pairs\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n # Return the whole dictionary\n print(f\"Successfully generated antonym paris @ {datetime.now()}\")\n return pairs", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANAP').get('abstractTypes')\n exolinks = globalMap.get('ANAP').get('exolinks')\n\n # DataType GraphicsHandlerType\n currentMap = {}\n abstractTypes['GraphicsHandlerType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'] = currentMap\n loadMaps['ANAP.GraphicsHandlerType'] = currentMap\n currentMap['tag'] = 'ANAP.GraphicsHandlerType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnalysisProfile\n currentMap = {}\n abstractTypes['AnalysisProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'analysisProfiles'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnalysisProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnalysisProfile.bgColor\n currentMap = {}\n contentMap['bgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'] = currentMap\n loadMaps['ANAP.AnalysisProfile.bgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.bgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'\n currentMap['name'] = 'bgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#FFFFFF'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.fgColor\n currentMap = {}\n contentMap['fgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'] = currentMap\n loadMaps['ANAP.AnalysisProfile.fgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.fgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'\n currentMap['name'] = 'fgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#000000'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.font\n currentMap = {}\n contentMap['font'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'] = currentMap\n loadMaps['ANAP.AnalysisProfile.font'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.font'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'\n currentMap['name'] = 'font'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.graphicsHandler\n currentMap = {}\n contentMap['graphicsHandler'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'] = currentMap\n loadMaps['ANAP.AnalysisProfile.graphicsHandler'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.graphicsHandler'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'\n currentMap['name'] = 'graphicsHandler'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'Tk'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001')\n\n # Attribute AnalysisProfile.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnalysisProfile.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'] = currentMap\n loadMaps['ANAP.AnalysisProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AnalysisProfile.panView\n currentMap = {}\n contentMap['panView'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'] = currentMap\n loadMaps['ANAP.AnalysisProfile.panView'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.panView'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'\n currentMap['name'] = 'panView'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.sendBugReports\n currentMap = {}\n contentMap['sendBugReports'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile.sendBugReports'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.sendBugReports'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'\n currentMap['name'] = 'sendBugReports'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'maybe'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2010-11-17-16:21:33_00001')\n\n # Attribute AnalysisProfile.transientDialogs\n currentMap = {}\n contentMap['transientDialogs'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientDialogs'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientDialogs'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'\n currentMap['name'] = 'transientDialogs'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.transientWindows\n currentMap = {}\n contentMap['transientWindows'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientWindows'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientWindows'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'\n currentMap['name'] = 'transientWindows'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.twoCharShortcuts\n currentMap = {}\n contentMap['twoCharShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'] = currentMap\n loadMaps['ANAP.AnalysisProfile.twoCharShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.twoCharShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'\n currentMap['name'] = 'twoCharShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useCrosshair\n currentMap = {}\n contentMap['useCrosshair'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useCrosshair'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useCrosshair'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'\n currentMap['name'] = 'useCrosshair'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useGlobalShortcuts\n currentMap = {}\n contentMap['useGlobalShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useGlobalShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useGlobalShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'\n currentMap['name'] = 'useGlobalShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.userEmail\n currentMap = {}\n contentMap['userEmail'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userEmail'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userEmail'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'\n currentMap['name'] = 'userEmail'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute AnalysisProfile.userName\n currentMap = {}\n contentMap['userName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userName'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'\n currentMap['name'] = 'userName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.userOrganisation\n currentMap = {}\n contentMap['userOrganisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userOrganisation'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userOrganisation'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'\n currentMap['name'] = 'userOrganisation'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.webBrowser\n currentMap = {}\n contentMap['webBrowser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'] = currentMap\n loadMaps['ANAP.AnalysisProfile.webBrowser'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.webBrowser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'\n currentMap['name'] = 'webBrowser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role AnalysisProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnalysisProfile.colorSchemes\n currentMap = {}\n contentMap['colorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'] = currentMap\n loadMaps['ANAP.AnalysisProfile.colorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.colorSchemes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'\n currentMap['name'] = 'colorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.macros\n currentMap = {}\n contentMap['macros'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'] = currentMap\n loadMaps['ANAP.AnalysisProfile.macros'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.macros'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'\n currentMap['name'] = 'macros'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.marksColor\n currentMap = {}\n contentMap['marksColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'] = currentMap\n loadMaps['ANAP.AnalysisProfile.marksColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.marksColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'\n currentMap['name'] = 'marksColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n\n # Role AnalysisProfile.refExpProfiles\n currentMap = {}\n contentMap['refExpProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'] = currentMap\n loadMaps['ANAP.AnalysisProfile.refExpProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.refExpProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'\n currentMap['name'] = 'refExpProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.residueProfiles\n currentMap = {}\n contentMap['residueProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'] = currentMap\n loadMaps['ANAP.AnalysisProfile.residueProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.residueProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'\n currentMap['name'] = 'residueProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.rulersColor\n currentMap = {}\n contentMap['rulersColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'] = currentMap\n loadMaps['ANAP.AnalysisProfile.rulersColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.rulersColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'\n currentMap['name'] = 'rulersColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n # End of AnalysisProfile\n\n currentMap = abstractTypes.get('AnalysisProfile')\n aList = ['createdBy', 'graphicsHandler', 'guid', 'isModifiable', 'lastUnlockedBy', 'name', 'panView', 'sendBugReports', 'transientDialogs', 'transientWindows', 'twoCharShortcuts', 'useCrosshair', 'useGlobalShortcuts', 'userEmail', 'webBrowser']\n currentMap['headerAttrs'] = aList\n aList = ['bgColor', 'fgColor', 'font', 'userName', 'userOrganisation', 'marksColor', 'rulersColor']\n currentMap['simpleAttrs'] = aList\n aList = ['residueProfiles', 'refExpProfiles', 'macros', 'colorSchemes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['colorSchemes', 'macros', 'refExpProfiles', 'residueProfiles']\n currentMap['children'] = aList\n\n # Class ColorScheme\n currentMap = {}\n abstractTypes['ColorScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'] = currentMap\n loadMaps['ANAP.ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'colorSchemes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ColorScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ColorScheme.colors\n currentMap = {}\n contentMap['colors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'] = currentMap\n loadMaps['ANAP.ColorScheme.colors'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.colors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'\n currentMap['name'] = 'colors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute ColorScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'] = currentMap\n loadMaps['ANAP.ColorScheme.name'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ColorScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ColorScheme\n\n currentMap = abstractTypes.get('ColorScheme')\n aList = ['colors', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Macro\n currentMap = {}\n abstractTypes['Macro'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'] = currentMap\n loadMaps['ANAP.Macro'] = currentMap\n currentMap['tag'] = 'ANAP.Macro'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'macros'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Macro.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Macro.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'] = currentMap\n loadMaps['ANAP.Macro.details'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Macro.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'] = currentMap\n loadMaps['ANAP.Macro.function'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.isInMenu\n currentMap = {}\n contentMap['isInMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'] = currentMap\n loadMaps['ANAP.Macro.isInMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'\n currentMap['name'] = 'isInMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.isInMouseMenu\n currentMap = {}\n contentMap['isInMouseMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'] = currentMap\n loadMaps['ANAP.Macro.isInMouseMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMouseMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'\n currentMap['name'] = 'isInMouseMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.module\n currentMap = {}\n contentMap['module'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'] = currentMap\n loadMaps['ANAP.Macro.module'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.module'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'\n currentMap['name'] = 'module'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'] = currentMap\n loadMaps['ANAP.Macro.name'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Macro.ordering\n currentMap = {}\n contentMap['ordering'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'] = currentMap\n loadMaps['ANAP.Macro.ordering'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.ordering'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'\n currentMap['name'] = 'ordering'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.path\n currentMap = {}\n contentMap['path'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'] = currentMap\n loadMaps['ANAP.Macro.path'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.path'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'\n currentMap['name'] = 'path'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00003')\n\n # Attribute Macro.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'] = currentMap\n loadMaps['ANAP.Macro.serial'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.shortcut\n currentMap = {}\n contentMap['shortcut'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'] = currentMap\n loadMaps['ANAP.Macro.shortcut'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.shortcut'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'\n currentMap['name'] = 'shortcut'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Macro.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Macro\n\n currentMap = abstractTypes.get('Macro')\n aList = ['function', 'isInMenu', 'isInMouseMenu', 'module', 'ordering', 'serial', 'shortcut']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'path']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefExpProfile\n currentMap = {}\n abstractTypes['RefExpProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'] = currentMap\n loadMaps['ANAP.RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refExpProfiles'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefExpProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefExpProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'] = currentMap\n loadMaps['ANAP.RefExpProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute RefExpProfile.peakSymbolColors\n currentMap = {}\n contentMap['peakSymbolColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakSymbolColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakSymbolColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'\n currentMap['name'] = 'peakSymbolColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.peakTextColors\n currentMap = {}\n contentMap['peakTextColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakTextColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakTextColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'\n currentMap['name'] = 'peakTextColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.refExpNames\n currentMap = {}\n contentMap['refExpNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'] = currentMap\n loadMaps['ANAP.RefExpProfile.refExpNames'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.refExpNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'\n currentMap['name'] = 'refExpNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role RefExpProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefExpProfile.negColorSchemes\n currentMap = {}\n contentMap['negColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'] = currentMap\n loadMaps['ANAP.RefExpProfile.negColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.negColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'\n currentMap['name'] = 'negColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role RefExpProfile.posColorSchemes\n currentMap = {}\n contentMap['posColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'] = currentMap\n loadMaps['ANAP.RefExpProfile.posColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.posColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'\n currentMap['name'] = 'posColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of RefExpProfile\n\n currentMap = abstractTypes.get('RefExpProfile')\n aList = ['name']\n currentMap['headerAttrs'] = aList\n aList = ['peakSymbolColors', 'peakTextColors', 'refExpNames', 'negColorSchemes', 'posColorSchemes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ResidueProfile\n currentMap = {}\n abstractTypes['ResidueProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'] = currentMap\n loadMaps['ANAP.ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'residueProfiles'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ResidueProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ResidueProfile.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'] = currentMap\n loadMaps['ANAP.ResidueProfile.ccpCode'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.guiName\n currentMap = {}\n contentMap['guiName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'] = currentMap\n loadMaps['ANAP.ResidueProfile.guiName'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.guiName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'\n currentMap['name'] = 'guiName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'] = currentMap\n loadMaps['ANAP.ResidueProfile.molType'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ResidueProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ResidueProfile\n\n currentMap = abstractTypes.get('ResidueProfile')\n aList = ['ccpCode', 'guiName', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnalysisProfile\n currentMap = {}\n exolinks['AnalysisProfile'] = currentMap\n loadMaps['ANAP.exo-AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-AnalysisProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['name'] = 'AnalysisProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to ColorScheme\n currentMap = {}\n exolinks['ColorScheme'] = currentMap\n loadMaps['ANAP.exo-ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ColorScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['name'] = 'ColorScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Macro\n currentMap = {}\n exolinks['Macro'] = currentMap\n loadMaps['ANAP.exo-Macro'] = currentMap\n currentMap['tag'] = 'ANAP.exo-Macro'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['name'] = 'Macro'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to RefExpProfile\n currentMap = {}\n exolinks['RefExpProfile'] = currentMap\n loadMaps['ANAP.exo-RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-RefExpProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['name'] = 'RefExpProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ResidueProfile\n currentMap = {}\n exolinks['ResidueProfile'] = currentMap\n loadMaps['ANAP.exo-ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ResidueProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['name'] = 'ResidueProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))", "def _parse(self, file):\n self.psl = {}\n for line in file.readlines():\n line = line.decode(\"utf-8\")\n line = re.sub(r\"//.*\", \"\", line)\n line = re.sub(r\"\\s.*\", \"\", line)\n if len(line) > 0:\n exc = False\n if line[0] == \"!\":\n exc = True\n line = line[1:]\n\n parts = [(re.match(r\"^[\\x20-\\x7e]+$\", part) and part)\n\t or \"xn--%s\" % part.encode(\"punycode\")\n\t for part in line.split(\".\")[::-1]]\n\n d = self.psl\n for i in xrange(len(parts)):\n\t part = parts[i].encode(\"utf-8\")\n\t if i < len(parts)-1:\n\t if part not in d or not isinstance(d[part], dict):\n\t d[part] = {}\n\t d = d[part]\n\t else:\n\t d[part] = exc", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('AFFI').get('abstractTypes')\n exolinks = globalMap.get('AFFI').get('exolinks')\n\n # DataType FamilyTitle\n currentMap = {}\n abstractTypes['FamilyTitle'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00021'] = currentMap\n loadMaps['AFFI.FamilyTitle'] = currentMap\n currentMap['tag'] = 'AFFI.FamilyTitle'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00021'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType PersonTitle\n currentMap = {}\n abstractTypes['PersonTitle'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00020'] = currentMap\n loadMaps['AFFI.PersonTitle'] = currentMap\n currentMap['tag'] = 'AFFI.PersonTitle'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00020'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AffiliationStore\n currentMap = {}\n abstractTypes['AffiliationStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00006'] = currentMap\n loadMaps['AFFI.AffiliationStore'] = currentMap\n currentMap['tag'] = 'AFFI.AffiliationStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'affiliationStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.general.Affiliation.AffiliationStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AffiliationStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AffiliationStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AffiliationStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AffiliationStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AffiliationStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AffiliationStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00009'] = currentMap\n loadMaps['AFFI.AffiliationStore.name'] = currentMap\n currentMap['tag'] = 'AFFI.AffiliationStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00009'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AffiliationStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AffiliationStore.organisations\n currentMap = {}\n contentMap['organisations'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00006'] = currentMap\n loadMaps['AFFI.AffiliationStore.organisations'] = currentMap\n currentMap['tag'] = 'AFFI.AffiliationStore.organisations'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00006'\n currentMap['name'] = 'organisations'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('AFFI').get('abstractTypes')\n\n # Role AffiliationStore.persons\n currentMap = {}\n contentMap['persons'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00008'] = currentMap\n loadMaps['AFFI.AffiliationStore.persons'] = currentMap\n currentMap['tag'] = 'AFFI.AffiliationStore.persons'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00008'\n currentMap['name'] = 'persons'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('AFFI').get('abstractTypes')\n # End of AffiliationStore\n\n currentMap = abstractTypes.get('AffiliationStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['persons', 'organisations', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['organisations', 'persons']\n currentMap['children'] = aList\n\n # Class Group\n currentMap = {}\n abstractTypes['Group'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00023'] = currentMap\n loadMaps['AFFI.Group'] = currentMap\n currentMap['tag'] = 'AFFI.Group'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00023'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'groups'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.general.Affiliation.Group\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Group.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Group.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00003'] = currentMap\n loadMaps['AFFI.Group.name'] = currentMap\n currentMap['tag'] = 'AFFI.Group.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Group.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00002'] = currentMap\n loadMaps['AFFI.Group.serial'] = currentMap\n currentMap['tag'] = 'AFFI.Group.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00002'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Group.url\n currentMap = {}\n contentMap['url'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00004'] = currentMap\n loadMaps['AFFI.Group.url'] = currentMap\n currentMap['tag'] = 'AFFI.Group.url'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:15_00004'\n currentMap['name'] = 'url'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role Group.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Group.personInGroups\n currentMap = {}\n contentMap['personInGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00041'] = currentMap\n loadMaps['AFFI.Group.personInGroups'] = currentMap\n currentMap['tag'] = 'AFFI.Group.personInGroups'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00041'\n currentMap['name'] = 'personInGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n # End of Group\n\n currentMap = abstractTypes.get('Group')\n aList = ['serial']\n currentMap['headerAttrs'] = aList\n aList = ['name', 'url', 'personInGroups']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Organisation\n currentMap = {}\n abstractTypes['Organisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00019'] = currentMap\n loadMaps['AFFI.Organisation'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00019'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'organisations'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.general.Affiliation.Organisation\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Organisation.addresses\n currentMap = {}\n contentMap['addresses'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00024'] = currentMap\n loadMaps['AFFI.Organisation.addresses'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.addresses'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00024'\n currentMap['name'] = 'addresses'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Organisation.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Organisation.city\n currentMap = {}\n contentMap['city'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00025'] = currentMap\n loadMaps['AFFI.Organisation.city'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.city'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00025'\n currentMap['name'] = 'city'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.country\n currentMap = {}\n contentMap['country'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00027'] = currentMap\n loadMaps['AFFI.Organisation.country'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.country'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00027'\n currentMap['name'] = 'country'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.emailAddress\n currentMap = {}\n contentMap['emailAddress'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00030'] = currentMap\n loadMaps['AFFI.Organisation.emailAddress'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.emailAddress'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00030'\n currentMap['name'] = 'emailAddress'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.faxNumber\n currentMap = {}\n contentMap['faxNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00029'] = currentMap\n loadMaps['AFFI.Organisation.faxNumber'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.faxNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00029'\n currentMap['name'] = 'faxNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00022'] = currentMap\n loadMaps['AFFI.Organisation.name'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00022'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.organisationType\n currentMap = {}\n contentMap['organisationType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00023'] = currentMap\n loadMaps['AFFI.Organisation.organisationType'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.organisationType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00023'\n currentMap['name'] = 'organisationType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.phoneNumber\n currentMap = {}\n contentMap['phoneNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00028'] = currentMap\n loadMaps['AFFI.Organisation.phoneNumber'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.phoneNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00028'\n currentMap['name'] = 'phoneNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.postalCode\n currentMap = {}\n contentMap['postalCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00026'] = currentMap\n loadMaps['AFFI.Organisation.postalCode'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.postalCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00026'\n currentMap['name'] = 'postalCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.province\n currentMap = {}\n contentMap['province'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-01-19-14:21:00_00001'] = currentMap\n loadMaps['AFFI.Organisation.province'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.province'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-01-19-14:21:00_00001'\n currentMap['name'] = 'province'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Organisation.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00021'] = currentMap\n loadMaps['AFFI.Organisation.serial'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00021'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Organisation.url\n currentMap = {}\n contentMap['url'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00031'] = currentMap\n loadMaps['AFFI.Organisation.url'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.url'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00031'\n currentMap['name'] = 'url'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role Organisation.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Organisation.groups\n currentMap = {}\n contentMap['groups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00020'] = currentMap\n loadMaps['AFFI.Organisation.groups'] = currentMap\n currentMap['tag'] = 'AFFI.Organisation.groups'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00020'\n currentMap['name'] = 'groups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('AFFI').get('abstractTypes')\n # End of Organisation\n\n currentMap = abstractTypes.get('Organisation')\n aList = ['serial']\n currentMap['headerAttrs'] = aList\n aList = ['addresses', 'city', 'country', 'emailAddress', 'faxNumber', 'name', 'organisationType', 'phoneNumber', 'postalCode', 'province', 'url']\n currentMap['simpleAttrs'] = aList\n aList = ['groups', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['groups']\n currentMap['children'] = aList\n\n # Class Person\n currentMap = {}\n abstractTypes['Person'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00018'] = currentMap\n loadMaps['AFFI.Person'] = currentMap\n currentMap['tag'] = 'AFFI.Person'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'persons'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.general.Affiliation.Person\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Person.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Person.familyName\n currentMap = {}\n contentMap['familyName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00011'] = currentMap\n loadMaps['AFFI.Person.familyName'] = currentMap\n currentMap['tag'] = 'AFFI.Person.familyName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00011'\n currentMap['name'] = 'familyName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Person.familyTitle\n currentMap = {}\n contentMap['familyTitle'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00015'] = currentMap\n loadMaps['AFFI.Person.familyTitle'] = currentMap\n currentMap['tag'] = 'AFFI.Person.familyTitle'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00015'\n currentMap['name'] = 'familyTitle'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00021')\n\n # Attribute Person.givenName\n currentMap = {}\n contentMap['givenName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00012'] = currentMap\n loadMaps['AFFI.Person.givenName'] = currentMap\n currentMap['tag'] = 'AFFI.Person.givenName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00012'\n currentMap['name'] = 'givenName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Person.middleInitials\n currentMap = {}\n contentMap['middleInitials'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00014'] = currentMap\n loadMaps['AFFI.Person.middleInitials'] = currentMap\n currentMap['tag'] = 'AFFI.Person.middleInitials'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00014'\n currentMap['name'] = 'middleInitials'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Person.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00010'] = currentMap\n loadMaps['AFFI.Person.serial'] = currentMap\n currentMap['tag'] = 'AFFI.Person.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00010'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Person.title\n currentMap = {}\n contentMap['title'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00016'] = currentMap\n loadMaps['AFFI.Person.title'] = currentMap\n currentMap['tag'] = 'AFFI.Person.title'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00016'\n currentMap['name'] = 'title'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00020')\n\n # Role Person.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Person.currentPersonInGroup\n currentMap = {}\n contentMap['currentPersonInGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-25-12:14:19_00001'] = currentMap\n loadMaps['AFFI.Person.currentPersonInGroup'] = currentMap\n currentMap['tag'] = 'AFFI.Person.currentPersonInGroup'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-25-12:14:19_00001'\n currentMap['name'] = 'currentPersonInGroup'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role Person.personInGroups\n currentMap = {}\n contentMap['personInGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00005'] = currentMap\n loadMaps['AFFI.Person.personInGroups'] = currentMap\n currentMap['tag'] = 'AFFI.Person.personInGroups'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00005'\n currentMap['name'] = 'personInGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('AFFI').get('abstractTypes')\n\n # Role Person.users\n currentMap = {}\n contentMap['users'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00009'] = currentMap\n loadMaps['AFFI.Person.users'] = currentMap\n currentMap['tag'] = 'AFFI.Person.users'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00009'\n currentMap['name'] = 'users'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = False\n currentMap['content'] = globalMap.get('ACCO').get('exolinks')\n # End of Person\n\n currentMap = abstractTypes.get('Person')\n aList = ['familyTitle', 'serial']\n currentMap['headerAttrs'] = aList\n aList = ['familyName', 'givenName', 'middleInitials', 'title', 'currentPersonInGroup']\n currentMap['simpleAttrs'] = aList\n aList = ['personInGroups', 'users', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['personInGroups']\n currentMap['children'] = aList\n\n # Class PersonInGroup\n currentMap = {}\n abstractTypes['PersonInGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00022'] = currentMap\n loadMaps['AFFI.PersonInGroup'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00022'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'personInGroups'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.general.Affiliation.PersonInGroup\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute PersonInGroup.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute PersonInGroup.deliveryAddress\n currentMap = {}\n contentMap['deliveryAddress'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00035'] = currentMap\n loadMaps['AFFI.PersonInGroup.deliveryAddress'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.deliveryAddress'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00035'\n currentMap['name'] = 'deliveryAddress'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute PersonInGroup.emailAddress\n currentMap = {}\n contentMap['emailAddress'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00036'] = currentMap\n loadMaps['AFFI.PersonInGroup.emailAddress'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.emailAddress'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00036'\n currentMap['name'] = 'emailAddress'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute PersonInGroup.endDate\n currentMap = {}\n contentMap['endDate'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00039'] = currentMap\n loadMaps['AFFI.PersonInGroup.endDate'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.endDate'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00039'\n currentMap['name'] = 'endDate'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00029')\n\n # Attribute PersonInGroup.faxNumber\n currentMap = {}\n contentMap['faxNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00038'] = currentMap\n loadMaps['AFFI.PersonInGroup.faxNumber'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.faxNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00038'\n currentMap['name'] = 'faxNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute PersonInGroup.mailingAddress\n currentMap = {}\n contentMap['mailingAddress'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00034'] = currentMap\n loadMaps['AFFI.PersonInGroup.mailingAddress'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.mailingAddress'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00034'\n currentMap['name'] = 'mailingAddress'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute PersonInGroup.phoneNumbers\n currentMap = {}\n contentMap['phoneNumbers'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00037'] = currentMap\n loadMaps['AFFI.PersonInGroup.phoneNumbers'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.phoneNumbers'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00037'\n currentMap['name'] = 'phoneNumbers'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute PersonInGroup.position\n currentMap = {}\n contentMap['position'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00033'] = currentMap\n loadMaps['AFFI.PersonInGroup.position'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.position'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00033'\n currentMap['name'] = 'position'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute PersonInGroup.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00032'] = currentMap\n loadMaps['AFFI.PersonInGroup.serial'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00032'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role PersonInGroup.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role PersonInGroup.group\n currentMap = {}\n contentMap['group'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00040'] = currentMap\n loadMaps['AFFI.PersonInGroup.group'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.group'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:14_00040'\n currentMap['name'] = 'group'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['copyOverride'] = True\n\n # Role PersonInGroup.photo\n currentMap = {}\n contentMap['photo'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-06-13:30:20_00001'] = currentMap\n loadMaps['AFFI.PersonInGroup.photo'] = currentMap\n currentMap['tag'] = 'AFFI.PersonInGroup.photo'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-06-13:30:20_00001'\n currentMap['name'] = 'photo'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('DLOC').get('exolinks')\n # End of PersonInGroup\n\n currentMap = abstractTypes.get('PersonInGroup')\n aList = ['serial']\n currentMap['headerAttrs'] = aList\n aList = ['deliveryAddress', 'emailAddress', 'endDate', 'faxNumber', 'mailingAddress', 'phoneNumbers', 'position']\n currentMap['simpleAttrs'] = aList\n aList = ['group']\n currentMap['optLinks'] = aList\n aList = ['photo', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AffiliationStore\n currentMap = {}\n exolinks['AffiliationStore'] = currentMap\n loadMaps['AFFI.exo-AffiliationStore'] = currentMap\n currentMap['tag'] = 'AFFI.exo-AffiliationStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00006'\n currentMap['name'] = 'AffiliationStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.general.Affiliation.AffiliationStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to Group\n currentMap = {}\n exolinks['Group'] = currentMap\n loadMaps['AFFI.exo-Group'] = currentMap\n currentMap['tag'] = 'AFFI.exo-Group'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00023'\n currentMap['name'] = 'Group'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.general.Affiliation.Group\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to Organisation\n currentMap = {}\n exolinks['Organisation'] = currentMap\n loadMaps['AFFI.exo-Organisation'] = currentMap\n currentMap['tag'] = 'AFFI.exo-Organisation'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00019'\n currentMap['name'] = 'Organisation'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.general.Affiliation.Organisation\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to Person\n currentMap = {}\n exolinks['Person'] = currentMap\n loadMaps['AFFI.exo-Person'] = currentMap\n currentMap['tag'] = 'AFFI.exo-Person'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00018'\n currentMap['name'] = 'Person'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.general.Affiliation.Person\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to PersonInGroup\n currentMap = {}\n exolinks['PersonInGroup'] = currentMap\n loadMaps['AFFI.exo-PersonInGroup'] = currentMap\n currentMap['tag'] = 'AFFI.exo-PersonInGroup'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00022'\n currentMap['name'] = 'PersonInGroup'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.general.Affiliation.PersonInGroup\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('TEMP').get('abstractTypes')\n exolinks = globalMap.get('TEMP').get('exolinks')\n\n # Class AbstractProbability\n currentMap = {}\n abstractTypes['AbstractProbability'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:50_00005'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.general.Template.AbstractProbability\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractProbability.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractProbability.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:38_00025'] = currentMap\n loadMaps['TEMP.AbstractProbability.weight'] = currentMap\n currentMap['tag'] = 'TEMP.AbstractProbability.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:38_00025'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Role AbstractProbability.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AbstractProbability\n\n currentMap = abstractTypes.get('AbstractProbability')\n aList = ['weight']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class MultiTypeValue\n currentMap = {}\n abstractTypes['MultiTypeValue'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-13-15:55:49_00003'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-13-15:55:49_00003'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.general.Template.MultiTypeValue\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute MultiTypeValue.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute MultiTypeValue.booleanValue\n currentMap = {}\n contentMap['booleanValue'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00008'] = currentMap\n loadMaps['TEMP.MultiTypeValue.booleanValue'] = currentMap\n currentMap['tag'] = 'TEMP.MultiTypeValue.booleanValue'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00008'\n currentMap['name'] = 'booleanValue'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute MultiTypeValue.floatValue\n currentMap = {}\n contentMap['floatValue'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00007'] = currentMap\n loadMaps['TEMP.MultiTypeValue.floatValue'] = currentMap\n currentMap['tag'] = 'TEMP.MultiTypeValue.floatValue'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00007'\n currentMap['name'] = 'floatValue'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute MultiTypeValue.intValue\n currentMap = {}\n contentMap['intValue'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00006'] = currentMap\n loadMaps['TEMP.MultiTypeValue.intValue'] = currentMap\n currentMap['tag'] = 'TEMP.MultiTypeValue.intValue'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00006'\n currentMap['name'] = 'intValue'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute MultiTypeValue.textValue\n currentMap = {}\n contentMap['textValue'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00005'] = currentMap\n loadMaps['TEMP.MultiTypeValue.textValue'] = currentMap\n currentMap['tag'] = 'TEMP.MultiTypeValue.textValue'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-11-13-15:55:55_00005'\n currentMap['name'] = 'textValue'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Role MultiTypeValue.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of MultiTypeValue\n\n currentMap = abstractTypes.get('MultiTypeValue')\n aList = ['booleanValue', 'floatValue', 'intValue']\n currentMap['headerAttrs'] = aList\n aList = ['textValue']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList", "def _output_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n fileout = os.path.normpath('{}/{}-{}.xml'.\\\n format(self.MapCreator, self.Source, self.ddnCurProject.get()))\n linesout = ['<?xml version=\"1.0\" encoding=\"UTF-8\"?>', \\\n '<DictionarySet xmlns:mc=\"urn:fmosoft-map-creator\" xmlns=\"urn:fmosoft-map-creator\" Version=\"1\">', \\\n ' <Dictionary SourceLanguage=\"{}\" SourceLanguageIsPredefined=\"true\" TargetLanguage=\"{}\" TargetLanguageIsPredefined=\"false\">'.\\\n format(self.Source, self.ddnCurProject.get()), \\\n ]\n for child in self.tree.get_children('approved'):\n vv = self.tree.item(child)['values']\n linesout.append(' <Translation Source=\"{}\" Target=\"{}\"/>'.format(vv[0], vv[1]))\n linesout.append(' </Dictionary>')\n linesout.append('</DictionarySet>')\n linesout.append('')\n\n if os.path.exists(fileout):\n os.remove(fileout)\n\n if fileout:\n output = codecs.open(fileout, mode='w', encoding='utf-8')\n output.write('\\n'.join(linesout))\n output.close()\n pass", "def generate_synonym_pairs(config: SettingConfig) -> dict:\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_synset_pairs(synset)\n\n # Append all pairs from the current PoS to the global set\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n return pairs", "def _convert_prondict(prondict):\n # TODO: Figure out a way to do this without removing stress information\n new_prondict = {}\n for word, pron in prondict.iteritems():\n # Syllabify pron\n try:\n syll_pron = [onset + nucleus + coda for onset, nucleus, coda in\n syllabify.syllabify(pron)]\n except ValueError:\n # Word could not be syllabified\n continue\n # Remove stress in each syllable\n new_sylls = [\"\".join(arpabet_arpaone([remove_stress(phoneme) for phoneme in syll]))\n for syll in syll_pron]\n new_pron = SYLL_MARKER + SYLL_MARKER.join(new_sylls) + SYLL_MARKER\n new_prondict[word] = new_pron\n\n return new_prondict", "def get_dict():\n # Open file and get contents\n try:\n open_file = open(os.getenv('HOME') + '/.pwman', 'r')\n except IOError:\n open(os.getenv('HOME') + '/.pwman', 'a').close()\n open_file = open(os.getenv('HOME') + '/.pwman', 'r')\n file_lines = open_file.readlines()\n open_file.close()\n # Create dictionary\n pw_dict = {}\n for pw_pair in [i.replace(\"\\n\", \"\").split(\":\") for i in file_lines]:\n pw_dict[pw_pair[0]] = [pw_pair[1], pw_pair[2]]\n # Return dictionary\n return pw_dict", "def create_dicts(self):\n \n # remove this string from filename to make output file names more manageable\n pre_output1 = self.file1.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n pre_output2 = self.file2.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n \n # Build the output file name.\n # if prefix is present add it\n if self.out_file_prefix is not None:\n # concatenate prefix, filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = self.out_file_prefix+pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n # if no prefix don't add it!\n else:\n # concatenate filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n\n # add temp to end of file name to create a temporary output filename\n self.tempoutputfilename = self.outputfilename.replace(\".txt\", '') + \"temp.txt\"\n\n # open temp output file\n self.tempoutputfile = open(self.outputfolder + self.tempoutputfilename, 'w')\n\n \n # open FE files\n file1_open = open(self.chosenfolder + self.file1, 'r')\n file2_open = open(self.chosenfolder + self.file2, 'r')\n\n # open file1 and create a dict of the features.\n for linenumber, line in enumerate(file1_open):\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file1_dict[int(splitline[1])] = line\n # get n of rows in file1 (take the linenumber of the last line)\n self.file1_len = linenumber\n\n # repeat for features in second file but first writing the feparam and stats to temp file - when pairing with control this ensures the \"header\" comes from the test (file2) not control (file1), NB NEITHER ARE ACCURATE!!!!\n for linenumber, line in enumerate(file2_open):\n if linenumber < 10:\n self.tempoutputfile.write(line)\n # then add all features to a dictionary, with the unique feature number as a key\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file2_dict[int(splitline[1])] = line\n # get n of rows in file2\n self.file2_len = linenumber\n\n # close files\n file1_open.close()\n file2_open.close()", "def read_pronunciation(pronunciation_file):\n # file = open('dictionary.txt', 'r')\n #\n # for line in file:\n # print line\n\n ################# https://m.reddit.com/r/CompSciPortfolio/comments/303fyo/assignment_3_poetry_reader/\n\n pronunciation_dictionary = {}\n line = pronunciation_file.readline()\n while line.startswith(';;;'):\n line = pronunciation_file.readline()\n while line != '':\n stripped_line = line.strip()\n separation = stripped_line.find(' ')\n pronunciation_dictionary[stripped_line[:separation]] = stripped_line[(separation + 2):].split()\n line = pronunciation_file.readline()\n return pronunciation_dictionary\n\n\n\n # my_list = {}\n # for line in pronunciation_file.readlines():\n # line = line.strip()\n # if line and \";;;\" not in line:\n # r = line.split()\n # word = r[0]\n # phonemes = r[1:]\n # my_list[word] = phonemes\n # return my_list", "def process_pathway_ontology(self) -> None:\n # Load pathway ontology from file\n pw = PathwayOntology(name=\"PW\",\n filename=self.pathway_ontology_file)\n pw.load_from_file()\n\n pw_dict = dict()\n\n for cl in pw.owl_classes:\n synonyms, annotations = pw.get_synonyms(cl)\n pw_dict[cl] = {\n 'name': pw.get_label(cl),\n 'aliases': pw.get_all_labels(cl) + synonyms,\n 'synonyms': annotations,\n 'definition': pw.get_definition(cl),\n 'subClassOf': pw.get_subClassOf(cl),\n 'part_of': pw.get_part_of(cl)\n }\n\n with open(self.pw_json_file, 'w') as outf:\n json.dump(pw_dict, outf, indent=4, sort_keys=True)", "def _create_yaml_map(self):", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CCLB').get('abstractTypes')\n exolinks = globalMap.get('CCLB').get('exolinks')\n\n # Class AtomLabel\n currentMap = {}\n abstractTypes['AtomLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'] = currentMap\n loadMaps['CCLB.AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'atomLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AtomLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AtomLabel.isotopeCode\n currentMap = {}\n contentMap['isotopeCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'] = currentMap\n loadMaps['CCLB.AtomLabel.isotopeCode'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.isotopeCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'\n currentMap['name'] = 'isotopeCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'] = currentMap\n loadMaps['CCLB.AtomLabel.name'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.subType\n currentMap = {}\n contentMap['subType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'] = currentMap\n loadMaps['CCLB.AtomLabel.subType'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.subType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'\n currentMap['name'] = 'subType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AtomLabel.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'] = currentMap\n loadMaps['CCLB.AtomLabel.weight'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role AtomLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AtomLabel\n\n currentMap = abstractTypes.get('AtomLabel')\n aList = ['isotopeCode', 'name', 'subType', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ChemCompLabel\n currentMap = {}\n abstractTypes['ChemCompLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'] = currentMap\n loadMaps['CCLB.ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemCompLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemCompLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemCompLabel.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'] = currentMap\n loadMaps['CCLB.ChemCompLabel.ccpCode'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ChemCompLabel.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'] = currentMap\n loadMaps['CCLB.ChemCompLabel.molType'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ChemCompLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemCompLabel.isotopomers\n currentMap = {}\n contentMap['isotopomers'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'] = currentMap\n loadMaps['CCLB.ChemCompLabel.isotopomers'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.isotopomers'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'\n currentMap['name'] = 'isotopomers'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of ChemCompLabel\n\n currentMap = abstractTypes.get('ChemCompLabel')\n aList = ['ccpCode', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['isotopomers', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopomers']\n currentMap['children'] = aList\n\n # Class Isotopomer\n currentMap = {}\n abstractTypes['Isotopomer'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'] = currentMap\n loadMaps['CCLB.Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopomers'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotopomer.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotopomer.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'] = currentMap\n loadMaps['CCLB.Isotopomer.serial'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotopomer.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'] = currentMap\n loadMaps['CCLB.Isotopomer.weight'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role Isotopomer.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Isotopomer.atomLabels\n currentMap = {}\n contentMap['atomLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'] = currentMap\n loadMaps['CCLB.Isotopomer.atomLabels'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.atomLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'\n currentMap['name'] = 'atomLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of Isotopomer\n\n currentMap = abstractTypes.get('Isotopomer')\n aList = ['serial', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['atomLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['atomLabels']\n currentMap['children'] = aList\n\n # Class LabelingScheme\n currentMap = {}\n abstractTypes['LabelingScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'] = currentMap\n loadMaps['CCLB.LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'labelingSchemes'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute LabelingScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute LabelingScheme.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'] = currentMap\n loadMaps['CCLB.LabelingScheme.details'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute LabelingScheme.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute LabelingScheme.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.longName\n currentMap = {}\n contentMap['longName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'] = currentMap\n loadMaps['CCLB.LabelingScheme.longName'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.longName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'\n currentMap['name'] = 'longName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute LabelingScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'] = currentMap\n loadMaps['CCLB.LabelingScheme.name'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role LabelingScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role LabelingScheme.chemCompLabels\n currentMap = {}\n contentMap['chemCompLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'] = currentMap\n loadMaps['CCLB.LabelingScheme.chemCompLabels'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.chemCompLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'\n currentMap['name'] = 'chemCompLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of LabelingScheme\n\n currentMap = abstractTypes.get('LabelingScheme')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy', 'name']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'longName']\n currentMap['simpleAttrs'] = aList\n aList = ['chemCompLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemCompLabels']\n currentMap['children'] = aList\n\n # Out-of-package link to AtomLabel\n currentMap = {}\n exolinks['AtomLabel'] = currentMap\n loadMaps['CCLB.exo-AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-AtomLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['name'] = 'AtomLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ChemCompLabel\n currentMap = {}\n exolinks['ChemCompLabel'] = currentMap\n loadMaps['CCLB.exo-ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-ChemCompLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['name'] = 'ChemCompLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n\n # Out-of-package link to Isotopomer\n currentMap = {}\n exolinks['Isotopomer'] = currentMap\n loadMaps['CCLB.exo-Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.exo-Isotopomer'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['name'] = 'Isotopomer'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to LabelingScheme\n currentMap = {}\n exolinks['LabelingScheme'] = currentMap\n loadMaps['CCLB.exo-LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.exo-LabelingScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['name'] = 'LabelingScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))", "def term_map_parse(self):\n try:\n if self.hpo_file == None:\n raise ValueError\n except ValueError:\n print('hpo.obo file required to create fabric trio case.')\n sys.exit(1)\n\n graph = obonet.read_obo(self.hpo_file)\n name_to_id = {data['name']: id_ for id_, data in graph.nodes(data=True) if 'name' in data}\n return name_to_id", "def _load_prond_from_file(fname: str) -> dict:\r\n \r\n prond = {}\r\n with open(fname, 'r', errors='ignore') as f:\r\n \r\n line = f.readline().strip()\r\n while line:\r\n \r\n if line[0].isalpha():\r\n word, pron = line.split(' ')\r\n \r\n # Ignore secondary pronunciations\r\n # Lowercase all\r\n if not word.endswith(')'):\r\n prond[word.lower()] = pron.split()\r\n\r\n line = f.readline().strip()\r\n \r\n return prond" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Merge two given graphs, namely synsets graph and SUMO graph. The final graph contain one type of nodes, namely synsets nodes. Each synset node has an attribute named "synset",
def merge(self, g1, g2): logger = logging.getLogger(__name__) g = BaseGraph() g.copy_graph_from(g1) plwn2sumo_dict = defaultdict(set) plwn2sumo_dict = self.get_plwn2sumo_dict() synset_on_vertex_dict = {} for node in g.all_nodes(): synset_id = node.synset.synset_id if synset_id in synset_on_vertex_dict: logger.warning("ID of some synset is not unique.") continue synset_on_vertex_dict[synset_id] = node num_of_edge = 0 for edge in g2.all_edges(): num_of_edge += 1 logger.info("%d/%d", num_of_edge, g2.num_edges()) parent_sumo_concept = edge.source().sumo child_sumo_concept = edge.target().sumo if parent_sumo_concept not in plwn2sumo_dict: logger.warning("The mapping file doesn't contain sumo concept '%s'.", parent_sumo_concept) continue if child_sumo_concept not in plwn2sumo_dict: logger.warning("The mapping file doesn't contain sumo concept '%s'.", child_sumo_concept) continue for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]: if parent_syn_id not in synset_on_vertex_dict: logger.warning("The mapping file contains synset '%d' that is not in the graph.", parent_syn_id) continue p_node = synset_on_vertex_dict[parent_syn_id] for child_syn_id in plwn2sumo_dict[child_sumo_concept]: if child_syn_id not in synset_on_vertex_dict: logger.warning("The mapping file contains synset '%d' that is not in the graph.", child_syn_id) continue ch_node = synset_on_vertex_dict[child_syn_id] g.add_edge(p_node, ch_node, [("rel", edge.rel)], simply=True) return g
[ "def merge(def1, def2):\n graph1 = def1['graph']\n graph2 = def2['graph']\n\n # Add graph2 nodes and edges to graph1\n for node, data in graph1.nodes_iter(data=True):\n # Add graph2 edges\n for edge1, edge2 in graph2.edges_iter():\n graph1.add_edge(edge1, edge2)\n # Add graph2 nodes\n for node2, data2 in graph2.nodes_iter(data=True):\n graph1.add_node(node2, **data2)\n\n # Combine definition attributes\n def1['synsets'].extend(def2['synsets'])", "def merge(self, g1, g2):\n g = BaseGraph()\n g.init_graph(drctd = g1.is_directed())\n\n g.merge_graphs(g1, g2)\n\n lemma_on_vertex_dict = defaultdict(set)\n for node in g.all_nodes():\n if node.synset:\n lu_set = node.synset.lu_set\n for lu in lu_set:\n lemma_on_vertex_dict[lu.lemma + '/' + str(lu.pos)].add(node)\n\n for node in g.all_nodes():\n if node.msr:\n lemma_pos = node.msr\n if lemma_on_vertex_dict.has_key(lemma_pos):\n for synset_id_node in lemma_on_vertex_dict[lemma_pos]:\n g.add_edge(node, synset_id_node, [(\"rel\", 'syn-msr')], simply = True)\n if g.is_directed():\n g.add_edge(synset_id_node, node, [(\"rel\", 'syn-msr')], simply = True)\n else:\n logging.getLogger(__name__).warning(\"No synset in the graph contains lemma '%s'.\", lemma_pos)\n\n return g", "def merge(self,graphs):\n\t\tname=''\n\t\tauthor=''\n\t\tfor i in xrange(0,len(graphs)):\n\t\t\tif i==0:\n\t\t\t\tname=name+graph[i].name\n\t\t\t\tauthor=author+graph[i].author\n\t\t\tname=name+'+'+graph[i].name\n\t\t\tauthor=author+' and '+graph[i].author\n\t\tproto=graph(name,author)", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def merge_graphs(g1, g2):\n\n if g1.DIRECTED or g2.DIRECTED:\n g = digraph()\n else:\n g = graph()\n\n for n in g1.nodes():\n g.add_node(n)\n for n in g2.nodes():\n if not n in g.nodes():\n g.add_node(n)\n for e in g1.edges():\n try:\n g.add_edge(e, g1.edge_weight(e))\n except:\n logging.info(\"merge_graphs: adding edge %d %d failed\" % (e[0], e[1]))\n for e in g2.edges():\n try:\n g.add_edge(e, g2.edge_weight(e))\n except:\n logging.info(\"merge_graphs: adding edge %d %d failed\" % (e[0], e[1]))\n return g", "def merge(self, other):\n self.add_nodes( other.nodeList )\n \n for node in other.nodeList:\n self.add_edges( other.edgesFrom(node) )", "def merge(self, other: \"GraphSet\") -> None:\n if other.name != self.name:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with name {other.name} into {self.name}\"\n )\n if other.version != self.version:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with version {other.version} into {self.version}\"\n )\n self.start_time = min(self.start_time, other.start_time)\n self.end_time = max(self.end_time, other.end_time)\n self.resources += other.resources\n self._resolve_duplicates()\n self.errors += other.errors\n self.stats.merge(other.stats)", "def graph_union(*args, **kwargs):\n\n if not len(args) > 1:\n raise AttributeError('At least two input Graphs required')\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(*args)\n\n all_share_common_origin = all([share_common_origin(args[0], n) for n in args[1:]])\n if all_share_common_origin and not kwargs.get('return_copy', False):\n\n nids = []\n for graph in args:\n nids.extend([n for n in graph.nodes if n not in nids])\n\n eids = []\n for graph in args:\n eids.extend([e for e in graph.edges if e not in eids])\n\n result = args[0].origin.getnodes(nids)\n result.edges.set_view(eids)\n return result\n else:\n\n # make a deep copy of the first graph\n result = args[0].copy(deep=True, copy_view=False)\n\n # we need control over the node ID to add\n # temporary turn off auto_nid if needed\n auto_nid = result.data.auto_nid\n result.data.auto_nid = False\n\n for graph in args[1:]:\n for node, attrib in graph.nodes.items():\n if node not in result.nodes:\n result.add_node(node, **attrib)\n\n for edge, attrib in graph.edges.items():\n if edge not in result.edges:\n result.add_edge(*edge, **attrib)\n\n # Restore auto_nid\n result.data.auto_nid = auto_nid\n\n return result", "def union(graphs):\n if len(graphs) == 0:\n return MolecularGraph()\n\n return nx.disjoint_union_all(graphs)", "def mix_graphs(source_graph1, source_graph2):\n g = clone_graph(source_graph1, identifier=source_graph1.identifier)\n g = clone_graph(source_graph2, target_graph=g)\n return g", "def union(node_a, node_b, nodes_set):\n\n a_root = find_set(node_a, nodes_set)\n b_root = find_set(node_b, nodes_set)\n link(a_root, b_root, nodes_set)", "def merge_synsets(definitions):\n # Map all synset having the same root.\n mapped = defaultdict(list)\n for definition in definitions:\n mapped[definition['graph'].graph['root']].append(definition)\n\n # For each list of synsets, merge them all into the first item of the list.\n merged = []\n for defs in mapped.itervalues():\n base = defs[0]\n for defn in defs[1:]:\n merge(base, defn)\n merged.append(base)\n return merged", "def merge_graphs(graphs, G=nx.Graph(), contig=None, coords=None):\n for graph in graphs:\n G = append_graph(G, graph, contig=contig, coords=coords)\n return G", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def concatenate_graphs(G1, G2):\n V = G1.V + G2.V\n edges = np.vstack((G1.edges, G1.V + G2.edges))\n weights = np.hstack((G1.weights, G2.weights))\n G = WeightedGraph(V, edges, weights)\n return G", "def merge(self, ASGgraph ):\r\n \r\n self.mergedASG.append(ASGgraph)\t\t\t\t\t# add the graph to the list of merged graphs\r\n for nodeType in ASGgraph.listNodes.keys():\r\n if not nodeType in self.listNodes.keys():\t\t\t# node type was not known\r\n self.listNodes[nodeType] = ASGgraph.listNodes[nodeType]\r\n self.nodeTypes.append(nodeType)\r\n else: \t# node type existed...\r\n for node in ASGgraph.listNodes[nodeType]:\t\t\t# add each node of merged graph to actual graph\r\n self.listNodes[nodeType].append(node)\r\n \r\n # copy also the model's attribute\r\n errors = []\r\n for attr in ASGgraph.generatedAttributes.keys():\r\n if attr in self.generatedAttributes.keys(): # Attribute is present!\r\n #print \"Attribute collision for \", attr, \"<-- New attribute value ignored\" \r\n errors.append(attr)\r\n if( not self.__collidedAttributeTracker.has_key( attr ) ):\r\n self.__collidedAttributeTracker[ attr ] = 1\r\n else:\r\n self.__collidedAttributeTracker[ attr ] += 1\r\n continue\r\n self.generatedAttributes[attr] = ASGgraph.generatedAttributes[attr]\r\n # now create the attribute!\r\n self.setAttrValue(attr, ASGgraph.getAttrValue(attr).clone())\r\n if( errors ):\r\n print 'Attribute name collisions occured during load (could affect '\\\r\n + 'old formalisms)\\nThe following attributes collided: '\\\r\n + str(errors) \r\n ## print 'In fact, these messages are slated for removal, as this ' \\\r\n ## 'attribute system is being bypassed to fix this problem'\r", "def merge_networks_in_series(n1, n2):\n new_l_size = n1.l_size + n2.l_size + 1 # One additional vertex in between.\n new_u_size = n1.u_size + n2.u_size\n\n # Connect the 0-pole and the inf-pole in the result network.\n new_link_edge = n1.zero_pole.insert_before()\n new_link_edge_opp = n2.inf_pole.insert_after()\n new_link_edge.opposite = new_link_edge_opp\n new_link_edge_opp.opposite = new_link_edge\n\n # Merge the 0-pole of n1 with the inf-pole of n2.\n n1.inf_pole.insert_all_after(n2.zero_pole)\n\n # Remove the link edges in n1 and n2 if they are not real.\n if not n1.is_linked:\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n if not n2.is_linked:\n n2.zero_pole.remove()\n n2.inf_pole.remove()\n\n # After a serial merge the poles are never linked.\n res = Network(new_link_edge, is_linked=False, l_size=new_l_size, u_size=new_u_size)\n res.type = 'S'\n return res\n\n # # Extract the poles from both networks.\n # first_net_zero_pole_edge = n1.zero_pole\n # first_net_inf_pole_edge = n1.inf_pole\n #\n # second_net_zero_pole_edge = n2.zero_pole\n # second_net_inf_pole_edge = n2.inf_pole\n #\n # # Create a new half edges for connecting the poles of the network. The\n # # edge will not be part from the edges list.\n # new_root_half_edge = first_net_zero_pole_edge.insert_after()\n # new_root_opposite = second_net_inf_pole_edge.insert_after()\n #\n # new_root_half_edge.opposite = new_root_opposite\n # new_root_opposite.opposite = new_root_half_edge\n #\n # # Get the half edges from both networks for merging\n # first_net_inf_pole_prior = first_net_inf_pole_edge.prior\n # second_net_zero_pole_edge_prior = second_net_zero_pole_edge.prior\n #\n # # Merge the both networks so that the inf-pole from the first network is\n # # identified with the zero-pole from the second one. Handling different\n # # while merging the two networks.\n # first_net_inf_pole_edge.prior = second_net_zero_pole_edge_prior\n # second_net_zero_pole_edge_prior.next = first_net_inf_pole_edge\n #\n # first_net_inf_pole_prior.next = second_net_zero_pole_edge\n # second_net_zero_pole_edge.prior = first_net_inf_pole_prior\n #\n # # Update the node numbers in the second network zero-pole edges\n # half_edge_walker = first_net_inf_pole_prior.next\n # while half_edge_walker != first_net_inf_pole_prior:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n #\n # # Check whether the original poles of the network that are merged are\n # # linked or not. If they are not linked then the corresponding half\n # # edges between them have to be removed.\n # if not n1.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # first_net_zero_pole_edge.remove()\n # first_net_inf_pole_edge.remove()\n #\n # if not n2.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # second_net_zero_pole_edge.remove()\n # second_net_inf_pole_edge.remove()\n #\n # # After a serial merge the poles are never linked.\n # res = Network(new_root_half_edge, is_linked=False,\n # l_size=new_l_size, u_size=new_u_size)\n # res.type = 'S'\n # return res", "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds and returns (in the form returned by decoderawtransaction) a transaction that spends the given utxo, pays CHI to some output
def build_tx (self, utxo, chiOut, name, nameAddr, value): nameData = self.nodes[0].name_show (name) inputs = [nameData, utxo] outputs = {nameAddr: Decimal ('0.01')} outputs.update (chiOut) tx = self.nodes[0].createrawtransaction (inputs, outputs) nameOp = { "op": "name_update", "name": name, "value": value, } tx = self.nodes[0].namerawtransaction (tx, 0, nameOp) res = self.nodes[0].decoderawtransaction (tx["hex"]) res["hex"] = tx["hex"] return res
[ "def sochain_utxo_to_xchain_utxo(utxo):\n hash = utxo['txid']\n index = utxo['output_no']\n \n value = round(float(utxo['value']) * 10 ** 8)\n script = bytearray.fromhex(utxo['script_hex']) #utxo['script_hex']\n witness_utxo = Witness_UTXO(value, script)\n return UTXO(hash, index, witness_utxo)", "def create_tx(self, coin, account, to, amount):\n if coin is ETH:\n gasEstimate = self.w3.eth.estimateGas(\n {\"from\": account.address, \"to\": to, \"value\": amount}\n )\n return {\n \"from\": account.address,\n \"to\": to,\n \"value\": self.w3.toWei(amount, 'ether'),\n \"gasPrice\": self.w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": self.w3.eth.getTransactionCount(account.address),\n }\n elif coin is BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(to, amount, BTC)])\n elif coin is BTC:\n return PrivateKey.prepare_transaction(account.address, [(to, amount, BTC)])\n else:\n return None", "def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)", "def build_transaction(self, kwargs):\n if 'source_asset' not in kwargs.keys():\n kwargs['source_asset'] = self.assets['xlm']\n\n base_fee = 100\n source_account = self.source_account\n destination = self.epic_gateway_addr\n destination_asset = kwargs['destination_asset']\n network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE\n send_amount = kwargs['send_amount']\n send_code = kwargs['send_code']\n set_timeout = kwargs['set_timeout']\n text_memo = kwargs['text_memo']\n\n path = self.get_send_paths(\n source_asset=kwargs['source_asset'],\n source_amount=kwargs['send_amount'],\n destination=self.epic_gateway_addr,\n )\n print(\"SUCCESS PATH: \", path)\n\n dest_min = path['destination_amount']\n\n print('')\n print(\"---------------------------------------------\")\n print(f\"TRADE {send_amount} {send_code} FOR {dest_min} {destination_asset.code}\")\n print(\"----------------------------------------------\")\n print('')\n\n transaction = TransactionBuilder(\n source_account=source_account,\n network_passphrase=network_passphrase,\n base_fee=base_fee) \\\n .append_path_payment_strict_send_op(\n send_code=send_code, send_issuer=None,\n dest_code=destination_asset.code,\n dest_issuer=destination_asset.issuer, send_amount=send_amount,\n dest_min=dest_min, destination=destination,\n path=path['path']) \\\n .set_timeout(set_timeout) \\\n .add_text_memo(text_memo) \\\n .build()\n print(f\"SUCCESS Swapic transaction build\")\n\n return transaction", "def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):\n if not type(inputs) is list:\n inputs = [inputs]\n\n tx = CTransaction()\n for i in inputs:\n tx.vin.append(CTxIn(COutPoint(i[\"txid\"], i[\"vout\"]), b\"\", 0xffffffff))\n for addr, amount in outputs.items():\n if addr == \"data\":\n tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))\n else:\n tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))\n tx.rehash()\n return hexlify(tx.serialize()).decode(\"utf-8\")", "def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):\n if not type(inputs) is list:\n inputs = [inputs]\n\n tx = CTransaction()\n for i in inputs:\n sigScript = i.get(\"sig\", b\"\")\n tx.vin.append(CTxIn(COutPoint(i[\"txid\"], i[\"vout\"]), sigScript, 0xffffffff))\n pairs = []\n if type(outputs) is dict:\n for addr, amount in outputs.items():\n pairs.append((addr,amount))\n else:\n pairs = outputs\n\n for addr, amount in pairs:\n if callable(addr):\n tx.vout.append(CTxOut(amount * BTC, addr()))\n elif type(addr) is list:\n tx.vout.append(CTxOut(amount * BTC, CScript(addr)))\n elif addr == \"data\":\n tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))\n else:\n tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))\n tx.rehash()\n return hexlify(tx.serialize()).decode(\"utf-8\")", "def use_contract(self):\n\n abi = None\n if os.path.exists(self.js_args['ARTIFACT_F']):\n with open(self.js_args['ARTIFACT_F']) as f:\n artifact_data = json.load(f)\n abi = artifact_data['abi']\n\n if abi is None or len(abi) == 0:\n self.print(\"Err: not able to get abi from the artifact, or abi is empty\", ERR)\n return\n\n self.print(\"We will now create the transaction which interacts with the contract.\")\n\n minerFee = self.parse_input(desc_line=\"set miner fee (in satoshis)\",\n default='1000',\n choices=None,\n i_msg=\"The miner fee is hardcoded to the transaction.\")\n\n func = self.parse_input(desc_line=\"Which function would you like to call?\",\n default=None,\n choices=[func_d[\"name\"] for func_d in abi],\n i_msg='Choose a function to call with the current transaction.')\n\n ### get function arguments\n\n self.print(\"Set the arguments to be passed to the chosen function.\\n\"\n \"** Note! type 'SIG' when the signature of the tx creator is required.\\n\"\n \" more information st '-i'\")\n args_i_msg = \"Set the arguments to be passed to the chosen function: {}\\n\" \\\n \"** Note! if the signature of the tx creator is required (mostly, this argument\\n\" \\\n \"is named 'sig s' or something similar), then it cannot be explicitly provided\\n\" \\\n \"by you since it has to sign the entire transaction, which is not ready yet.. \\n\" \\\n \"so in this case, just type in 'SIG'\".format(func)\n funcInputs = None\n for func_d in abi:\n if func_d[\"name\"] == func:\n funcInputs = func_d[\"inputs\"]\n\n funcArgs = []\n for item in funcInputs:\n v = self.parse_input(desc_line=\"{} {} = \".format(item[\"type\"], item[\"name\"]),\n default=None,\n choices=None,\n i_msg=args_i_msg)\n if v == 'SIG':\n v = \"new SignatureTemplate(walletInfo.childKeyPair)\"\n funcArgs.append(v)\n\n def short_parse(a):\n return \"{}\".format(a) if a.isdigit() else \"'{}'\".format(a)\n funcArgs_str = ', '.join([short_parse(arg) for arg in funcArgs])\n\n ### tx output\n outputs = []\n amounts = []\n self.print(\"Type in the outputs of the transaction. Note that order matters.\")\n output_i_msg = \"Define the output(s) of the tx. Both the cashAdresses and amounts (satoshis).\\n\" \\\n \"If the function being called has a restriction over its recipients, these must\\n\" \\\n \"be given here in the exact same order as mentioned in the contract source code.\"\n output_count = 0\n while 'y' == self._y_n_question('would you like to add a new output?', i_msg=output_i_msg):\n out = self.parse_input(desc_line=\"output_{} (cashAdress)= \".format(output_count),\n default=None,\n choices=None,\n i_msg=output_i_msg)\n amount = self.parse_input(desc_line=\"amount_{} (satoshis)= \".format(output_count),\n default=None,\n choices=None,\n i_msg=output_i_msg)\n outputs.append(out)\n amounts.append(amount)\n output_count += 1\n to_str = ''.join([\".to('{}', {})\".format(t[0], t[1]) for t in zip(outputs,amounts)])\n\n self.js_args['TX_FUNC'] = \"await con.functions.{}({}){}.withHardcodedFee({});\".format(func,\n funcArgs_str,\n to_str,\n minerFee)\n\n self.js_args['MAIN'] = \"use_contract();\"\n self.js_run()", "def send_tx(args):\n kwargs = {\n '--privkey': args.privkey,\n '--to': AMEND_ADDR,\n '--code': args.code,\n '--value': str(args.value),\n }\n args = functools.reduce(\n lambda lst, kv: lst + list(kv),\n kwargs.items(),\n [],\n )\n print(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'send_tx.py'])\n with open('../output/transaction/hash') as fobj:\n return fobj.read().strip()", "def _generate_coinbase_transaction(self, height):\n coinbase_transaction = tx.generate_coinbase_transaction(\n height, COINBASE_ADDRESS)\n coinbase_transaction.generate_tx_hash()\n # Validate and add coinbase transaction at index 0 of\n # waiting_transactions\n if(tx.validate_coinbase_transaction(coinbase_transaction, height)):\n self.waiting_transactions.insert(0, coinbase_transaction)", "def spend(tx, idx, utxos, **kwargs):\n\n ctx = {**DEFAULT_CONTEXT, \"tx\":tx, \"idx\":idx, \"utxos\":utxos, **kwargs}\n\n def to_script(elem):\n \"\"\"If fed a CScript, return it; if fed bytes, return a CScript that pushes it.\"\"\"\n if isinstance(elem, CScript):\n return elem\n else:\n return CScript([elem])\n\n scriptsig_list = flatten(get(ctx, \"scriptsig\"))\n scriptsig = CScript(b\"\".join(bytes(to_script(elem)) for elem in scriptsig_list))\n witness_stack = flatten(get(ctx, \"witness\"))\n return (scriptsig, witness_stack)", "def create_transaction_output(address, output_value):\n bitcoin_address = CBitcoinAddress(address)\n tx_out = CMutableTxOut(output_value, bitcoin_address.to_scriptPubKey())\n return tx_out", "def transaction(self, commands, quorum=1,\n creator_account=None, created_time=None):\n assert creator_account or self.creator_account, \\\n \"No account name specified as transaction creator id\"\n if not created_time:\n created_time = self.now()\n if not creator_account:\n creator_account = self.creator_account\n tx = transaction_pb2.Transaction()\n core_payload = tx.payload.reduced_payload\n # setting transaction contents\n core_payload.quorum = quorum\n core_payload.created_time = created_time\n core_payload.creator_account_id = creator_account\n core_payload.commands.extend(commands)\n return tx", "def generate_token_ota_transaction(self):\n\n # generate token transaction\n self.generate_token_transaction()\n\n # generate ota receiving wallet\n self.ota_receiving_wallet = CreateKeystore()\n self.ota_receiving_wallet.create_wallet()\n\n # time.sleep(5)\n\n child = pexpect.spawn('node tokenOTAtransaction', cwd='../src/')\n if commonUtil.show_logs:\n child.logfile = sys.stdout\n\n commonUtil.check_expect(\"Input file name\", child, test_name,\n \"'Input file name' prompt not found\")\n child.sendline(self.get_ota_wallet().get_file_name())\n\n commonUtil.check_expect(\"Input password:\", child, test_name,\n \"'Input password:' prompt not found\")\n child.sendline(self.get_ota_wallet().get_password())\n\n commonUtil.check_expect(\"Input: \", child, test_name,\n \"'Input:' prompt for ota address not found\")\n child.sendline(self.get_token_ota_address())\n\n commonUtil.check_expect(\"Input: \", child, test_name,\n \"'Input:' prompt for receiving wan address not found\")\n child.sendline(self.ota_receiving_wallet.get_wan_address())\n\n commonUtil.check_expect(\"Input: \", child, test_name,\n \"'Input:' prompt for stamp address not found\")\n\n child.sendline(\"0x\" + self.get_ota_wallet().get_stamp_address())\n\n commonUtil.check_expect(\"Token balance of)[\\s\\S]*(\" + commonUtil.default_wan_transfer_amount, child, test_name,\n \"Token ota balance message not displayed as expected\")\n\n child.expect(pexpect.EOF)", "def mk_simple_transaction(self, from_addr, to_addr, send_value):\n transaction = dict(\n nonce=self.web3.eth.get_transaction_count(from_addr),\n gasPrice=self.web3.eth.gasPrice,\n # there must be an automated way to automatically set the gas price\n # based off of the gas strategy\n gas=100000,\n to=to_addr,\n value=self.web3.toWei(send_value, 'wei')\n )\n return transaction", "def transfer_ownership(self, to):\n contract_payload = {\"symbol\": self.symbol.upper(), \"to\": to}\n json_data = {\"contractName\":\"nft\",\"contractAction\":\"transferOwnership\",\n \"contractPayload\":contract_payload}\n assert self.blockchain.is_hive\n tx = self.blockchain.custom_json(self.ssc_id, json_data, required_auths=[self[\"issuer\"]])\n return tx", "def MakeUnsignedCombineTx(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ejecutar_agregar_transaccion(blockchain: list) -> None:\n codigo = input(\"Ingrese el código de la transacción: \")\n remitente = input(\"Ingrese la dirección de la cuenta que envía la transacción: \")\n operacion = input(\"Ingrese el tipo de operación (transferencia/contrato): \")\n destinatario = \"\"\n if operacion == \"transferencia\":\n destinatario = input(\"Ingrese la dirección de la cuenta que recibe la transacción: \")\n valor = float(input(\"Ingrese el valor a transferir en la transacción: \"))\n\n # TODO: complete el codigo haciendo el llamado a la funcion del modulo que\n # implementa este requerimiento", "def from_transaction(transaction, output_index):\n obj = transaction['outputs'][output_index]\n transaction_json = json.dumps(transaction, sort_keys=True, separators=(',', ':'))\n transaction_digest = hashlib.sha256(transaction_json).hexdigest()\n object_digest = hashlib.sha256(transaction['outputs'][output_index]).hexdigest()\n object_id = '{}|{}|{}'.format(transaction_digest, object_digest, output_index)\n object_id = hashlib.sha256(object_id).hexdigest()\n return ChainspaceObject(object_id, obj)", "def transaction(sender, receiver, amount, gas, timestamp):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Signs a transaction (in format of build_tx) with the given node, and returns the decoderawtransactiontype result again.
def sign (self, node, tx): signed = node.signrawtransactionwithwallet (tx["hex"]) res = node.decoderawtransaction (signed["hex"]) res.update (signed) return res
[ "def sign_transaction(self, transaction, prvkey):\n return self.web3.eth.account.sign_transaction(transaction, prvkey)", "def sign_transaction(transaction, priv_key):\n serialized_tx_msg = serialize_transaction(tx=transaction, signed=False)\n return sign_msg(serialized_tx_msg, priv_key)", "def sign_transaction(self, transaction):\n try:\n address = transaction.from_address\n private_key = self.addresses[address]['private_key']\n transaction.sign_transaction(private_key)\n except Exception as ex:\n print(\"Error signing transaction from address: \" + address + \" \" + str(ex))", "def test_sign_transaction(self):\n tx_msg = SigningMessage(\n performative=SigningMessage.Performative.SIGN_TRANSACTION,\n skill_callback_ids=self.skill_callback_ids,\n skill_callback_info=self.skill_callback_info,\n terms=self.terms,\n raw_transaction=RawTransaction(self.ledger_id, \"transaction\"),\n )\n assert tx_msg._is_consistent()\n encoded_tx_msg = tx_msg.encode()\n decoded_tx_msg = tx_msg.serializer.decode(encoded_tx_msg)\n assert tx_msg == decoded_tx_msg", "def submit_signed_tx(self, tx: Transaction):\n # encode transaction and append signatures\n encoded_tx = transaction.encode_transaction(tx)\n\n # submit and return digest\n return self._post_tx_json(encoded_tx, None)", "def online_sign(self, transaction: dict):\n\n if 'signature' in transaction:\n raise TronError('Transaction is already signed')\n\n address = self.tron.address.from_private_key(self.tron.private_key).hex.lower()\n owner_address = transaction['raw_data']['contract'][0]['parameter']['value']['owner_address']\n\n if address != owner_address:\n raise ValueError('Private key does not match address in transaction')\n\n return self.tron.manager.request('/wallet/gettransactionsign', {\n 'transaction': transaction,\n 'privateKey': self.tron.private_key\n })", "def sign_tx(self, psbt: PSBT) -> PSBT:\n raise NotImplementedError(\"The HardwareWalletClient base class \"\n \"does not implement this method\")", "def sign_and_send(transaction: str, passphrase: str, client: algod.AlgodClient) -> Dict[str, str or int]:\n private_key = mnemonic.to_private_key(passphrase)\n signed_transaction = transaction.sign(private_key)\n transaction_id = signed_transaction.transaction.get_txid()\n client.send_transaction(signed_transaction, headers={'content-type': 'application/x-binary'})\n transaction_info = wait_for_confirmation(client, transaction_id)\n return transaction_info", "def serialize_transaction(tx, signed=True):\n tx_out = tx[\"outputs\"]\n tx_ins = tx[\"inputs\"]\n tx_scripts = tx[\"scripts\"]\n\n out = \"\"\n out += cutils.num_to_hex_string(tx[\"type\"])\n\n out += cutils.num_to_hex_string(tx[\"version\"])\n\n out += (serialize_exclusive(tx[\"type\"]))(tx)\n\n out += cutils.num_to_var_int(len(tx[\"attributes\"]))\n\n for attribute in tx[\"attributes\"]:\n attr = TransactionAttribute(**attribute)\n out += serialize_transaction_attribute(attr)\n\n out += cutils.num_to_var_int(len(tx_ins))\n\n for tx_in in tx_ins:\n inp = TransactionInput(**tx_in)\n out += serialize_transaction_input(inp)\n\n out += cutils.num_to_var_int(len(tx_out))\n\n for output in tx_out:\n outp = TransactionOutput(**output)\n out += serialize_transaction_output(outp)\n\n if signed and tx_scripts and (len(tx_scripts) > 0):\n out += cutils.num_to_var_int(len(tx_scripts))\n for script in tx_scripts:\n witness = Witness(**script)\n out += serialize_witness(witness)\n\n logger.debug(\"Final serialized transaction message to sign {0}\".format(out))\n return out.strip()", "def _signTx(tx, secret=None, secondSecret=None):\n\tif not secret:\n\t\ttry:\n\t\t\treturn tx.sign()\n\t\texcept core.NoSecretDefinedError:\n\t\t\treturn tx\n\telse:\n\t\treturn tx.sign(secret, secondSecret)", "def sign(self, verify=True):\n # exception if we are not the forger\n raw = self.to_raw()\n if self.verbose:\n print(raw)\n self.pubkey = poscrypto.PUB_KEY.to_string()\n self.txid = poscrypto.sign(raw, verify=verify)", "def serialize_transaction( filled_transaction ):\n # although this will always be present for this module\n # keep this check anyway\n if \"v\" in filled_transaction:\n # https://github.com/harmony-one/harmony/blob/f8879f5e0288157bf95ae2898a9a27f0c85ff9ad/core/types/transaction_signing.go#L173\n if \"shardID\" in filled_transaction and filled_transaction[\n \"v\" ] < 1666600000:\n serializer = SignedHarmonyTxData\n else:\n serializer = SignedEthereumTxData\n else:\n if \"shardID\" in filled_transaction:\n serializer = UnsignedHarmonyTxData\n else:\n serializer = UnsignedEthereumTxData\n for field, _ in serializer._meta.fields:\n assert field in filled_transaction, f\"Could not find {field} in transaction\"\n return serializer.from_dict(\n {\n field: filled_transaction[ field ]\n for field,\n _ in serializer._meta.fields\n }\n )", "def sign_tx(self, tx):\n if self.privkey:\n log.info('signing tx', tx=tx, account=self)\n tx.sign(self.privkey)\n else:\n raise ValueError('Locked account cannot sign tx')", "def sign_transaction( transaction_dict, private_key ) -> SignedTransaction:\n account, sanitized_transaction = sanitize_transaction(transaction_dict, private_key)\n if \"to\" in sanitized_transaction and sanitized_transaction[ \"to\"\n ] is not None:\n sanitized_transaction[ \"to\" ] = convert_one_to_hex(\n sanitized_transaction[ \"to\" ]\n )\n # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/transactions.py#L39\n filled_transaction = pipe(\n sanitized_transaction,\n dict,\n partial( merge,\n TRANSACTION_DEFAULTS ),\n chain_id_to_v,\n apply_formatters_to_dict( HARMONY_FORMATTERS ),\n )\n unsigned_transaction = serialize_transaction( filled_transaction )\n transaction_hash = unsigned_transaction.hash()\n\n # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/signing.py#L26\n if isinstance(\n unsigned_transaction,\n ( UnsignedEthereumTxData,\n UnsignedHarmonyTxData )\n ):\n chain_id = None\n else:\n chain_id = unsigned_transaction.v\n (\n v, # pylint: disable=invalid-name\n r, # pylint: disable=invalid-name\n s, # pylint: disable=invalid-name\n ) = sign_transaction_hash(\n account._key_obj, transaction_hash, chain_id\n )\n encoded_transaction = encode_transaction(\n unsigned_transaction,\n vrs = ( v,\n r,\n s )\n )\n signed_transaction_hash = keccak( encoded_transaction )\n return SignedTransaction(\n rawTransaction = HexBytes( encoded_transaction ),\n hash = HexBytes( signed_transaction_hash ),\n r = r,\n s = s,\n v = v,\n )", "def encode_transaction( unsigned_transaction, vrs ):\n ( v, r, s ) = vrs # pylint: disable=invalid-name\n chain_naive_transaction = dissoc(\n unsigned_transaction.as_dict(),\n \"v\",\n \"r\",\n \"s\"\n )\n if isinstance(\n unsigned_transaction,\n ( UnsignedHarmonyTxData,\n SignedHarmonyTxData )\n ):\n serializer = SignedHarmonyTxData\n else:\n serializer = SignedEthereumTxData\n signed_transaction = serializer(\n v = v,\n r = r,\n s = s,\n **chain_naive_transaction\n )\n return rlp.encode( signed_transaction )", "def sign_tx(self, tx: payloads.Transaction, password: str, magic: Optional[int] = None) -> None:\n if magic is None:\n magic = settings.network.magic\n\n self._validate_tx(tx)\n\n message = magic.to_bytes(4, byteorder=\"little\", signed=False) + tx.hash().to_array()\n signature = self.sign(message, password)\n\n invocation_script = vm.ScriptBuilder().emit_push(signature).to_array()\n # mypy can't infer that the is_watchonly check ensures public_key has a value\n verification_script = contracts.Contract.create_signature_redeemscript(self.public_key) # type: ignore\n tx.witnesses.insert(0, payloads.Witness(invocation_script, verification_script))", "def _get_signed_transaction(\n self,\n transaction_signing_request: TransactionSigningRequest\n ) -> Union[SignedTransaction, TransactionRejected]:\n assert isinstance(transaction_signing_request, TransactionSigningRequest)\n\n try:\n transaction = Transaction(\n nonce = transaction_signing_request.nonce,\n gasprice = transaction_signing_request.gasprice,\n startgas = transaction_signing_request.startgas,\n to = transaction_signing_request.to,\n value = transaction_signing_request.value,\n data = transaction_signing_request.data,\n )\n except (InvalidTransaction, TypeError):\n # Is it possible to load the transaction using the library we're using to sign it?\n # If not, rejection reason is InvalidTransaction\n return TransactionRejected(\n reason=TransactionRejected.REASON.InvalidTransaction,\n nonce=transaction_signing_request.nonce,\n )\n\n # If transaction is correct, sign it.\n try:\n transaction.sign(self.ethereum_private_key)\n except (InvalidTransaction, TypeError):\n # Does the transaction execute a function from the contract that the service has the private key for?\n # If not, rejection reason is UnauthorizedAccount.\n return TransactionRejected(\n reason=TransactionRejected.REASON.UnauthorizedAccount,\n nonce=transaction_signing_request.nonce,\n )\n\n assert transaction.v is not None\n assert transaction.r is not None\n assert transaction.s is not None\n\n # Respond with SignedTransaction.\n return SignedTransaction(\n nonce = transaction_signing_request.nonce,\n gasprice = transaction_signing_request.gasprice,\n startgas = transaction_signing_request.startgas,\n to = transaction_signing_request.to,\n value = transaction_signing_request.value,\n data = transaction_signing_request.data,\n v = transaction.v,\n r = transaction.r,\n s = transaction.s,\n )", "def sign_transaction(self, private_key):\n\n to_be_hashed = (str(self.timestamp) +\n str(self.sender_address) +\n str(self.recipient_address) +\n str(self.amount) +\n # str(self.transaction_inputs) +\n # str(self.transaction_outputs) +\n str(self.transaction_id))\n\n # Create a hash value of the whole message\n sha_hash = SHA256.new(to_be_hashed.encode())\n\n # Import private key\n key = RSA.importKey(private_key)\n\n # print(sha_hash)\n\n # Construct an instance of the crypto object\n cipher = PKCS1_v1_5.new(key)\n\n # Create and return the signature\n self.transaction_signature = cipher.sign(sha_hash)", "def submit_transaction():\n data = request.get_json()\n\n # Create candidate transaction object\n try:\n tx = Transaction.from_dict(data['transaction'])\n except (KeyError, TypeError):\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n statuses = []\n # Broadcast if needed and turn off broadcasting for other nodes\n if request.args.get('broadcast', type=int, default=0):\n for node_ in node.network:\n if not node_['id'] == node.node_id:\n response = requests.post(\n node_['ip'] + '/transactions/submit?broadcast=0',\n json=dict(\n transaction=data['transaction'],\n signature=data['signature']\n )\n )\n statuses.append(response.status_code)\n\n if not response.status_code == 200:\n response = dict(message='Transaction rejected by the network.')\n return jsonify(response), 202\n\n # Validate transaction as-is\n val_result = validate_transaction_document(tx)\n if isinstance(val_result, str):\n response = dict(message=val_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Verify signature\n # defined in backend/utils\n sign_result = verify_signature(tx, data['signature'])\n if isinstance(sign_result, str):\n response = dict(message=sign_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Add transaction to local blockchain\n node.blkchain.add_transaction(tx)\n myurl = node.network[node.node_id]['ip']\n url = myurl + '/blockchain/mine_block'\n mine_resp = requests.get(url=url)\n if mine_resp.status_code == 200:\n block_dict = mine_resp.json()\n add_resp = requests.post(url=myurl + '/blockchain/add_block?\\\n broadcast=1', json=block_dict)\n # run consensus \n requests.get(url=myurl+'/blockchain/consensus')\n\n response = dict(message='Transaction added.')\n\n return jsonify(response), 200" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pick section of signal
def pick_section(signal, section=None): len_noise = signal.shape[-1] if section is None: len_sig = len_noise ii = 0 elif isinstance(section, int): len_sig = section ii = np.random.randint(0, len_noise - len_sig) else: len_sig = np.asarray(section).shape[-1] ii = np.random.randint(0, len_noise - len_sig) return signal[..., ii:ii + len_sig]
[ "def selection_fn(self, trace, points, selector):\n self.segment = self.fig.layout[\"sliders\"][0].active\n seg = self.segment\n\n xrange = selector.xrange\n wave = self.wave[seg]\n mask = self.mask[seg]\n\n # Choose pixels and value depending on selected type\n if self.mask_type == \"good\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 0)\n elif self.mask_type == \"bad\":\n value = 0\n idx = (wave > xrange[0]) & (wave < xrange[1])\n elif self.mask_type == \"line\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask != 0)\n print(np.count_nonzero(idx))\n elif self.mask_type == \"cont\":\n value = 2\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 1)\n else:\n return\n\n # Apply changes if any\n if np.count_nonzero(idx) != 0:\n self.mask[seg][idx] = value\n\n with self.fig.batch_update():\n # Update Line Mask\n m = self.line_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 1\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y\n\n # Update Cont Mask\n m = self.cont_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 2\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y", "def onpick(self, event):\n\n if isinstance(event.artist, pylab.Line2D) or \\\n isinstance(event.artist, matplotlib.collections.RegularPolyCollection):\n\n for subplotnr, handles in enumerate(self.phs):\n if event.artist in handles:\n col = handles.index(event.artist)\n break\n else:\n plotnr = None\n return\n \n line = event.artist\n ind = event.ind\n mm = (~self.datasy[subplotnr].mask[:, col]).nonzero()[0][ind]\n\n row = self.dataid[mm]\n self.subject.GetView().SelectRow(row, \n (event.mouseevent.key == 'shift'))\n self.subject.GetView().MakeCellVisible(row, 0)\n \n elif isinstance(event.artist, matplotlib.axis.XAxis):\n print \"you were hitting the xaxis\"\n\n elif event.artist is self.axs[-1].get_xaxis().get_label():\n print \"you were hitting the x label\"\n print event.artist", "def segment(self, time_series_sample):\n pass", "def _select_nearby_events(self):\n\n if self._ra_low < 0.:\n selected = np.nonzero((\n (self._decs >= self._dec_low)\n & (self._decs <= self._dec_high)\n & (((self._ras >= 0.) & (self._ras <= self._ra_high))\n # include all events that are close to the source\n # from the `other side of 2pi´ someone call a mathematician, how do you properly say that?\n # 2pi ambiguity?\n | ((self._ras >= self._ra_low + 2 * np.pi) & (self._ras <= 2 * np.pi)))\n ))\n elif self._ra_high > 2 * np.pi:\n selected = np.nonzero((\n (self._decs >= self._dec_low)\n & (self._decs <= self._dec_high)\n & (((self._ras <= 2 * np.pi) & (self._ras >= self._ra_low))\n | ((self._ras >= 0.) & (self._ras <= self._ra_high - 2 * np.pi)))\n ))\n else:\n selected = np.nonzero((\n (self._decs >= self._dec_low)\n & (self._decs <= self._dec_high)\n & (self._ras >= self._ra_low)\n & (self._ras <= self._ra_high))\n )\n selected_dec_band = np.nonzero((\n (self._decs >= self._dec_low) & (self._decs <= self._dec_high))\n )\n\n self._selected = selected\n\n self._selected_ras = self._ras[selected]\n\n self._selected_decs = self._decs[selected]\n\n self._selected_energies = self._energies[selected]\n\n self._selected_bg_energies = self._energies#[selected_dec_band]\n\n self._selected_bg_ras = self._ras#[selected_dec_band]\n\n self._selected_bg_decs = self._decs#[selected_dec_band]\n \n\n if isinstance(self._ang_errs, np.ndarray):\n self._selected_ang_errs = self._ang_errs[selected]\n else:\n self._selected_ang_errs = [1] * len(selected[0])\n\n self.Nprime = len(selected[0])\n\n self.N = self._energies.size #len(selected_dec_band[0])\n\n if isinstance(self._direction_likelihood, EventDependentSpatialGaussianLikelihood):\n self._signal_llh_spatial = self._direction_likelihood(\n self._selected_ang_errs,\n self._selected_ras,\n self._selected_decs, \n self._source_coord\n )", "def slicewhere(condition):\n regions = ndimage.find_objects(ndimage.label(condition)[0])\n return [region[0] for region in regions]", "def get_sample_mask(self):", "def select_section(view, mn_consts, section_x, section_y):\n view.selectAt(section_x, section_y, mn_consts.infoSetSelection)", "def picker(line, Mevent):\n if Mevent.xdata is None:\n return False, dict()\n x0 = line.get_xdata()\n y0 = line.get_ydata()\n d = np.sqrt((x0 - Mevent.xdata)**2. + (y0 - Mevent.ydata)**2.)\n #ind = np.nonzero(np.less_equal(d, maxd))\n ind = (np.argmin(d),)\n if len(ind):\n pickx = np.take(x0, ind)\n picky = np.take(y0, ind)\n props = dict(ind=ind, pickx=pickx, picky=picky)\n return True, props\n else:\n return False, dict()", "def select_spline(polydata):\r\n \r\n mapper = vtk.vtkPolyDataMapper()\r\n mapper.SetInputConnection(polydata.GetOutputPort())\r\n mapper.ScalarVisibilityOff()\r\n \r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().SetInterpolationToFlat()\r\n \r\n renderer = vtk.vtkRenderer()\r\n renderWindow = vtk.vtkRenderWindow()\r\n renderWindow.AddRenderer(renderer)\r\n interactor = vtk.vtkRenderWindowInteractor()\r\n interactor.SetRenderWindow(renderWindow)\r\n \r\n renderer.AddActor(actor)\r\n renderer.SetBackground(.3, .4, .5)\r\n \r\n isolines = vtk.vtkContourFilter()\r\n isolines.SetInputConnection(polydata.GetOutputPort())\r\n isolines.SetValue(0,0.7)\r\n isolines.SetValue(1,-0.7)\r\n isolines.Update()\r\n \r\n stripper = vtk.vtkStripper()\r\n stripper.SetInputConnection(isolines.GetOutputPort())\r\n stripper.Update()\r\n\r\n def order_points(stripper, keep_ratio=1):\r\n sample_rate = int(1/keep_ratio)\r\n lines = stripper.GetOutput().GetLines()\r\n points = stripper.GetOutput().GetPoints()\r\n NumberOfLines = stripper.GetOutput().GetNumberOfLines()\r\n pts = vtk.vtkIdList()\r\n lines.InitTraversal()\r\n list_poly = []\r\n \r\n for i in range(NumberOfLines):\r\n lines.GetNextCell(pts)\r\n \r\n nb_pts = pts.GetNumberOfIds()\r\n points_ord = vtk.vtkPoints()\r\n line_ord = vtk.vtkCellArray()\r\n line_ord.InsertNextCell(pts)\r\n \r\n for j in range(nb_pts):\r\n if j%sample_rate==0:\r\n points_ord.InsertPoint(j//sample_rate, points.GetPoint(pts.GetId(j)))\r\n \r\n poly_ordered = vtk.vtkPolyData()\r\n poly_ordered.SetPoints(points_ord)\r\n poly_ordered.SetLines(line_ord)\r\n list_poly.append(poly_ordered)\r\n return list_poly[::-1]\r\n\r\n polys_ordered = order_points(stripper, keep_ratio=0.2)\r\n polys_ordered = [poly for poly in polys_ordered if poly.GetNumberOfPoints() > 10]\r\n representations = []\r\n contours = []\r\n \r\n contourWidget = vtk.vtkContourWidget()\r\n contours.append(contourWidget)\r\n contourWidget.SetInteractor(interactor)\r\n #rep = vtk.vtkOrientedGlyphContourRepresentation(contourWidget.GetRepresentation())\r\n rep = contourWidget.GetRepresentation()\r\n representations.append(rep)\r\n rep.GetLinesProperty().SetColor(1, 0.2, 0);\r\n rep.GetLinesProperty().SetLineWidth(3.0);\r\n \r\n pointPlacer = vtk.vtkPolygonalSurfacePointPlacer()\r\n pointPlacer.AddProp(actor)\r\n pointPlacer.SnapToClosestPointOn()\r\n pointPlacer.GetPolys().AddItem( polydata.GetOutput() )\r\n rep.SetPointPlacer(pointPlacer)\r\n \r\n #interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator()\r\n #interpolator.GetPolys().AddItem(polydata.GetOutput())\r\n interpolator = vtk.vtkBezierContourLineInterpolator()\r\n rep.SetLineInterpolator(interpolator)\r\n\r\n contourWidget.EnabledOn()\r\n \r\n splines = []\r\n \r\n i = [0]\r\n def keyPressEvent(obj, event):\r\n \r\n key = obj.GetKeySym()\r\n if key == 'Return' and i[0]<len(polys_ordered):\r\n if i[0]!=0:\r\n spline = vtk.vtkPolyData()\r\n spline.DeepCopy(rep.GetContourRepresentationAsPolyData())\r\n splines.append(spline)\r\n contourWidget.Initialize(polys_ordered[i[0]], 1)#, pts)\r\n contourWidget.Modified()\r\n obj.GetRenderWindow().Render()\r\n i[0]+=1\r\n elif key == 'Return':\r\n splines.append(rep.GetContourRepresentationAsPolyData())\r\n renderWindow.Finalize()\r\n interactor.TerminateApp()\r\n \r\n interactor.AddObserver(\"KeyPressEvent\", keyPressEvent)\r\n \r\n renderer.ResetCamera()\r\n renderWindow.Render()\r\n interactor.Initialize()\r\n interactor.Start()\r\n\r\n return splines", "def pick(o, p1, p2, event=None):\n\n\tinear = -1\n\tdmin = 1.e10\n\n\tfor i in range(0, len(o.spheres)):\n\t\ts = o.spheres[i]\n\t\td = glDistFromLine(s, p1, p2)\n\n\t\tif d < dmin:\n\t\t\tinear = i\n\t\t\tdmin = d\n\n\to.picked_sphere = inear\n\n\t\"\"\"If we want the viewer to redraw we return a true value.\"\"\"\n\n\treturn 1", "def __getSectionToOptimise(self):\r\n\r\n if self.sect_to_optimise == 'whole_signal':\r\n start_opt_index = 0\r\n end_opt_index = int(self.num_points) \r\n \r\n # create index and value arrays of points (particles K) to optimise\r\n K_index = np.asarray(list(range(start_opt_index, end_opt_index+1)))\r\n K = np.asarray([self.init_OP[start_opt_index:end_opt_index]])\r\n\r\n # transpose to column vectors \r\n K_index = K_index.transpose()\r\n K = K.transpose()\r\n\r\n return K_index, K", "def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose", "def on_select_clip_slot(self, clip_slot):\n pass", "def randselwave(sample, minlen=0, maxlen=None, nosilence=True):\n if nosilence:\n sig = rmsilence(sample)\n else:\n sig = sample.signal\n\n sigsize = len(sig)\n minoffset = int(minlen * sample.samplerate)\n maxoffset = min(int(maxlen*sample.samplerate),\n sigsize) if maxlen else sigsize\n\n assert (minoffset < maxoffset) and (minoffset <= sigsize), \\\n f\"\"\"BAD: siglen={sigsize}, minlen={minoffset}, maxlen={maxoffset}\"\"\"\n\n # Select begin sample\n ns = randrange(max(1, sigsize-minoffset))\n ne = randrange(ns+minoffset, min(ns+maxoffset, sigsize+1))\n\n return sig[ns:ne]", "def _pick_arms(self):\n return", "def masked_select(self, mask):\n return array_funcs.masked_select(self, mask)", "def removeCP(signal):\r\n return signal[L:(L+N)]", "def __call__(self,event):\n self.event=event\n #make a new point that is an PickEvent type\n npoint=event.artist\n #if the right button is clicked mask the point\n if event.mouseevent.button==3:\n #get the point that was clicked on\n ii=event.ind\n xd=npoint.get_xdata()[ii]\n yd=npoint.get_ydata()[ii]\n \n #set the x index from the frequency dictionary\n ll=self.fdict[self.fignum][self.jj]['{0:.5g}'.format(xd[0])]\n \n #change the data to be a zero\n self.data[self.fignum][self.jj][ll]=0\n \n #reset the point to be a gray x\n self.ax.plot(xd,yd,ls='None',color=(.7,.7,.7),marker=self.marker,\n ms=4)\n \n #if the left button is clicked change both resistivity and phase points\n elif event.mouseevent.button==1:\n #get the point that was clicked on\n ii=event.ind\n xd=npoint.get_xdata()[ii]\n yd=npoint.get_ydata()[ii]\n \n #set the x index from the frequency dictionary\n ll=self.fdict[self.fignum][self.jj]['{0:.5g}'.format(xd[0])]\n \n #set the data point to zero\n self.data[self.fignum][self.jj][ll]=0\n \n #reset the point to be a gray x\n self.ax.plot(xd,yd,ls='None',color=(.7,.7,.7),marker=self.marker,\n ms=4)\n \n #check to make sure there is a corresponding res/phase point\n try:\n #get the corresponding y-value \n yd2=self.data[self.fignum][self.kk][ll]\n \n #set that data point to 0 as well\n self.data[self.fignum][self.kk][ll]=0\n \n #make that data point a gray x\n self.axlst[self.fignum][self.kk].plot(xd,yd2,ls='None',\n color=(.7,.7,.7),marker=self.marker,\n ms=4)\n except KeyError:\n print('Axis does not contain res/phase point')\n \n #if click the scroll button or middle button change increase the \n #errorbars by the given amount\n elif event.mouseevent.button==2:\n ii=event.ind\n xd=npoint.get_xdata()[ii]\n yd=npoint.get_ydata()[ii]\n \n #get x index\n ll=self.fdict[self.fignum][self.jj]['{0:.5g}'.format(xd[0])]\n \n #make error bar array\n eb=self.errlst[self.fignum][self.jj][2].get_paths()[ll].vertices\n \n #make ecap array\n ecapl=self.errlst[self.fignum][self.jj][0].get_data()[1][ll]\n ecapu=self.errlst[self.fignum][self.jj][1].get_data()[1][ll]\n \n #change apparent resistivity error\n if self.jj==0 or self.jj==1:\n nebu=eb[0,1]-self.reserrinc*eb[0,1]\n nebl=eb[1,1]+self.reserrinc*eb[1,1]\n ecapl=ecapl-self.reserrinc*ecapl\n ecapu=ecapu+self.reserrinc*ecapu\n \n #change phase error\n elif self.jj==2 or self.jj==3:\n nebu=eb[0,1]-eb[0,1]*self.phaseerrinc\n nebl=eb[1,1]+eb[1,1]*self.phaseerrinc\n ecapl=ecapl-ecapl*self.phaseerrinc\n ecapu=ecapu+ecapu*self.phaseerrinc\n \n #put the new error into the error array \n self.error[self.fignum][self.jj][ll]=abs(nebu-\\\n self.data[self.fignum][self.jj][ll])\n \n #set the new error bar values\n eb[0,1]=nebu\n eb[1,1]=nebl\n \n #reset the error bars and caps\n ncapl=self.errlst[self.fignum][self.jj][0].get_data()\n ncapu=self.errlst[self.fignum][self.jj][1].get_data()\n ncapl[1][ll]=ecapl\n ncapu[1][ll]=ecapu\n \n #set the values \n self.errlst[self.fignum][self.jj][0].set_data(ncapl)\n self.errlst[self.fignum][self.jj][1].set_data(ncapu)\n self.errlst[self.fignum][self.jj][2].get_paths()[ll].vertices=eb\n \n #redraw the canvas\n self.ax.figure.canvas.draw()", "def randselphon(sample, phonfunc=None):\n (ns, ne), ph = sample.phonemeseq[randrange(len(sample.phonemeseq))]\n if phonfunc is not None:\n while not phonfunc(ph):\n (ns, ne), ph = sample.phonemeseq[randrange(len(sample.phonemeseq))]\n\n return sample.signal[ns:ne], ph" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the average level across all sentences. The levels are calculated according to the toolbox's reference level. Returns
def average_level(self): spl = [utils.dbspl(x) for x in self.load_files()] return np.mean(spl), np.std(spl)
[ "def rouge_l_sentence_level(eval_sentences, ref_sentences):\n\n f1_scores = []\n for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n m = float(len(ref_sentence))\n n = float(len(eval_sentence))\n lcs = _len_lcs(eval_sentence, ref_sentence)\n f1_scores.append(_f_lcs(lcs, m, n))\n return np.mean(f1_scores, dtype=np.float32)", "def powers_sumner_kearl_grade_level(total_words, total_sentences, total_syllables):\n average_sentence_length = total_words / total_sentences\n average_syllables_length = 100 * total_syllables / total_words\n return 0.0778 * average_sentence_length + .0455 * average_syllables_length - 2.2029", "def corpus_average_depth(corpus):\n local_averages = []\n for key in corpus:\n s = corpus[key]\n if average_depth(s) is not None:\n local_averages.append(average_depth(s))\n else:\n pass\n return float(sum(local_averages)) / len(local_averages)", "def _find_average_score(sentenceValue) -> int:\n sumValues = 0\n for entry in sentenceValue:\n sumValues += sentenceValue[entry]\n\n # Average value of a sentence from original text\n #average = (sumValues / len(sentenceValue))\n try:\n average = (sumValues / len(sentenceValue))\n except ZeroDivisionError:\n average = 0\n\n return average", "def get_line_score(self):\n score = 0\n\n if len(self.tokens) == 0:\n return score\n\n for token in [t[2] for t in self.tokens if not t[2] is None]:\n score += mean([s for s in token.values()])\n\n return score / len(self.tokens)", "def _find_average_score(sentenceValue) -> int:\n sumValues = 0\n for entry in sentenceValue:\n sumValues += sentenceValue[entry]\n\n # The average value of a sentence from original text is calculated by setting average to the\n # Sum divided by the len/value\n average = (sumValues / len(sentenceValue))\n\n return average", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average", "def get_avg_sentence_length(self):\n sentences = self.blob.sentences\n average_sentence_length = np.mean(np.array([len(sentence.words) for sentence in sentences]))\n return average_sentence_length", "def average_depth(parse):\n depths = []\n current_depth = 0\n for token in parse.split():\n if token == '(':\n current_depth += 1\n elif token == ')':\n current_depth -= 1\n else:\n depths.append(current_depth)\n if depths:\n return float(sum(depths)) / len(depths)\n else:\n pass", "def get_average_gain(self):\n return self.average_gain", "def get_sentence_avg_value(self, sentence):\n\n all_values = self.get_sentence_values(sentence)\n\n try:\n avg = sum(all_values) / len(all_values)\n except ZeroDivisionError:\n avg = 0\n\n return avg", "def get_level(lines):\n return lines // 10", "def calculate_avg_cholesterol(self):\n total = 0\n no_of_valid_patients = 0\n for patient in self._patient_list:\n try:\n total += patient.get_cholesterol_data()[0]\n no_of_valid_patients += 1\n except AttributeError:\n continue\n except TypeError:\n continue\n if no_of_valid_patients == 0:\n return 0\n average = total/no_of_valid_patients\n self.average_cholesterol_level = average\n return average", "def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))", "def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])", "def get_avg_word_count(self):\n for key, value in self.dict_word_count.items(): # Filling in the word count avg dictionary\n self.dict_avg_count[key] = round((value / self.count), 1)", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate a new TypeDefer
def __init__(self, raw_defer: Dict): self.kind = raw_defer.get("kind") self.name = raw_defer.get("name") self.of_type: TypeDefer = TypeDefer(raw_defer.get("ofType")) if raw_defer.get("ofType") is not None else None
[ "def instantiate():\n d = defer.Deferred()", "def Instance(self) -> TypeManager:", "def __call__(self, *args):\n return TypeCall(self, args)", "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def _construct(self, typ: Type[T]) -> T:\n sig = signature(typ.__init__)\n dependencies = []\n for param in islice(sig.parameters.values(), 1, None):\n inst = self.get(param.annotation)\n dependencies.append(inst)\n return typ(*dependencies)", "def _factory(*args_, **kwargs_):\n return DataSetType(*args_, **kwargs_)", "def create_type(apiId=None, definition=None, format=None):\n pass", "def create_type(self, **attrs):\n return self._create(_type.Type, **attrs)", "def makeDeferred(value):\n return SyncDeferred(value)", "def Reference(cls):\n return type(cls.__name__, (Typed, ), {\"type\": cls})", "def test_type_alias(systemcls: Type[model.System]) -> None:\n\n mod = fromText(\n '''\n from typing import Callable, Tuple, TypeAlias, TypeVar\n \n T = TypeVar('T')\n Parser = Callable[[str], Tuple[int, bytes, bytes]]\n mylst = yourlst = list[str]\n alist: TypeAlias = 'list[str]'\n \n notanalias = 'Callable[[str], Tuple[int, bytes, bytes]]'\n\n class F:\n from ext import what\n L = _j = what.some = list[str]\n def __init__(self):\n self.Pouet: TypeAlias = 'Callable[[str], Tuple[int, bytes, bytes]]'\n self.Q = q = list[str]\n \n ''', systemcls=systemcls)\n\n assert mod.contents['T'].kind == model.DocumentableKind.TYPE_VARIABLE\n assert mod.contents['Parser'].kind == model.DocumentableKind.TYPE_ALIAS\n assert mod.contents['mylst'].kind == model.DocumentableKind.TYPE_ALIAS\n assert mod.contents['yourlst'].kind == model.DocumentableKind.TYPE_ALIAS\n assert mod.contents['alist'].kind == model.DocumentableKind.TYPE_ALIAS\n assert mod.contents['notanalias'].kind == model.DocumentableKind.VARIABLE\n assert mod.contents['F'].contents['L'].kind == model.DocumentableKind.TYPE_ALIAS\n assert mod.contents['F'].contents['_j'].kind == model.DocumentableKind.TYPE_ALIAS\n\n # Type variables in instance variables are not recognized\n assert mod.contents['F'].contents['Pouet'].kind == model.DocumentableKind.INSTANCE_VARIABLE\n assert mod.contents['F'].contents['Q'].kind == model.DocumentableKind.INSTANCE_VARIABLE", "def create_a_class(directly=True):\n if directly:\n class Foo(object):\n i = 4\n\n class Bar(Foo):\n def get_i(self):\n return self.i\n\n return Bar\n else:\n Foo = type('Foo', (), dict(i=4))\n Bar = type('Bar', (Foo,), dict(get_i=lambda self: self.i))\n return Bar", "def define(**names):\n module = initialize(2)\n __deferred_definitions__ = module.__deferred_definitions__\n for name, specifier in names.items():\n __deferred_definitions__[name] = Deferred(name, specifier)", "def defer(self, *args, **kwargs):\n return DeferredRoutineCall(self, *args, **kwargs)", "def factory(type_or_name: str | type, singleton: bool = False) -> Callable[[T], T]:\n\n def _decorator(original: T) -> T:\n setattr(original, 'factory_provider', (type_or_name, singleton))\n return original\n\n return _decorator", "def _create_mock_type(filename=\"\", content=\"\", is_disable=False):\n mock_type = Mock(spec=Type)\n mock_type.filename = filename\n mock_type.content = content\n mock_type.id = ObjectId()\n mock_type.is_disabled = is_disable\n return mock_type", "def wrapped_unit(cls) -> MyType:\n MyType.clear_interning_cache()\n return MyType.decorate(MyUnit)", "def _factory(*args_, **kwargs_):\n return ObsType(*args_, **kwargs_)", "def __init__(self, line, context):\n match = Ftype_type_decl.type_match(line)\n if match is None:\n raise ParseSyntaxError(\"type declaration\", token=line, context=context)\n else:\n self._match_len = len(match.group(0))\n self._class = match.group(1)\n self._typestr = match.group(2)\n self._kind = self.typestr\n # End if" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new Schema instance. Firstly the schema will be loaded synchronously from the endpoint and stored as raw json for further processing. Then the request types will be parsed. Those are "Query", "Mutation" and "Subscription". After that the schema types and directives are parsed.
def __init__(self, endpoint: str, transporter: Transporter, settings: Settings, cache: Optional[Cache]): self.endpoint = endpoint self.transport = transporter self.settings = settings self.cache = cache if self.cache is not None: schema_introspection = self.cache.retrieve(self.endpoint, SCHEMA_KEY) if schema_introspection is None: schema_introspection = self.introspect_schema(endpoint, transporter) self.cache.store(self.endpoint, SCHEMA_KEY, schema_introspection) else: schema_introspection = self.introspect_schema(endpoint, transporter) # graphql schema properties self.raw_schema = schema_introspection.get(self.settings.default_response_key, {}).get("__schema", {}) self.query_type: str = self.parse_query_type(self.raw_schema) self.mutation_type: str = self.parse_mutation_type(self.raw_schema) self.subscription_type: str = self.parse_subscription_type(self.raw_schema) self.types: Dict[str, SchemaType] = self.parse_types(self.raw_schema.get("types", [])) self.directives: Dict[str, Directive] = self.parse_directives(self.raw_schema.get("directives", [])) # custom schema properties self.queries: Tuple[Operation] = self.parse_operations(self.query_type) self.mutations: Tuple[Operation] = self.parse_operations(self.mutation_type) self.subscriptions: Tuple[Operation] = self.parse_operations(self.subscription_type)
[ "def schema(self) -> AitoSchema:\n return self.schema_cls.from_deserialized_object(self._json)", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def _get_schema(self):\n\n schema = ProtocolSchema()\n\n schema.id = self.id\n schema.type = type(self).__name__\n\n for input_path in self.required_inputs:\n\n if not (input_path.start_protocol is None or (input_path.start_protocol == self.id and\n input_path.start_protocol == input_path.last_protocol)):\n\n continue\n\n # Always make sure to only pass a copy of the input. Changing the schema\n # should NOT change the protocol.\n schema.inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))\n\n return schema", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def post_schema():\n return StructType([\n StructField(\"Accept\", StringType(), True),\n StructField(\"Host\", StringType(), True),\n StructField(\"User-Agent\", StringType(), True),\n StructField(\"event_type\", StringType(), True),\n StructField(\"Content-Length\", StringType(), True),\n StructField(\"Content-Type\", StringType(), True),\n StructField(\"attributes\", StringType(), True)\n ])", "def from_request(self, **extras):\n assert isinstance(self, Schema)\n\n from flask import json, request\n\n if request.content_type == 'application/json' and request.data:\n request_body = json.loads(request.data)\n else:\n request_body = {}\n\n content = {}\n\n for f in self.fields: # type: Field\n if f.has_value_in(extras):\n f.set_value_in(content, f.load(f.get_value_in(extras)))\n elif f.has_value_in(request.args):\n f.set_value_in(content, f.load(f.get_value_in(request.args)))\n elif f.has_value_in(request_body):\n f.set_value_in(content, f.load(f.get_value_in(request_body)))\n elif f.has_value_in(request.form):\n f.set_value_in(content, f.load(f.get_value_in(request.form)))\n elif f.forbidden:\n continue\n elif f.default is not Field.nothing:\n f.set_value_in(content, f.default)\n elif f.required:\n raise f.Missing(f.name, reason='required')\n\n if self.instance_factory is None:\n return content\n else:\n return self.instance_factory(**content)", "def schema(self):\n if not self._schema:\n response = self.api.make_request('GET', '%s/schema' % self.path)\n self._schema = response.data\n \n return self._schema", "def _CreateSchema(\n self,\n cls: Optional[TypeHinter],\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if cls is None:\n raise ValueError(\"Trying to extract schema of None.\")\n\n if (inspect.isclass(cls) and issubclass(cls, rdf_structs.RDFProtoStruct)):\n cls = cls.protobuf.DESCRIPTOR\n\n type_name = _GetTypeName(cls)\n # \"Primitive\" types should be already present in `self.schema_objs`.\n if type_name in self.schema_objs:\n return\n\n if type_name in visiting:\n # Dependency cycle.\n return\n\n if isinstance(cls, FieldDescriptor):\n if _IsMapField(cls):\n self._CreateMapFieldSchema(cls, visiting)\n return\n\n descriptor = cls.message_type or cls.enum_type\n if descriptor:\n self._CreateSchema(descriptor, visiting)\n # else, this field is of a primitive type whose schema is already created.\n\n return\n\n if isinstance(cls, Descriptor):\n self._CreateMessageSchema(cls, visiting)\n return\n\n if isinstance(cls, EnumDescriptor):\n self._CreateEnumSchema(cls)\n return\n\n raise TypeError(f\"Don't know how to handle type \\\"{type_name}\\\" \"\n f\"which is not a protobuf message Descriptor, \"\n f\"nor an EnumDescriptor, nor a primitive type.\")", "def __init__(self, schema: str, **kwargs) -> None:\n self.schema = schema", "def create_schema() -> GraphSchema:\n return GraphSchema()", "def _get_schema(self):\n\n self._qcodes_dataset = DataSet(run_id=self.run_id, conn=self._conn)\n dep_params, indep_params = parameters_from_description(self.run_description)\n\n return Schema(\n datashape=None,\n dtype=None,\n shape=(self._dataset.number_of_results,), # not sure what else to do here\n npartitions= len(dep_params),\n extra_metadata={\n 'dataset_metadata': self._dataset.metadata,\n }\n )", "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = concept.attrib['id']\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema", "def make_skeleton_schema(self):\n self.schema_from_scratch = True\n # Use Jinja to render the template schema file to a variable\n env = jinja2.Environment(\n loader=jinja2.PackageLoader(\"nf_core\", \"pipeline-template\"), keep_trailing_newline=True\n )\n schema_template = env.get_template(\"nextflow_schema.json\")\n template_vars = {\n \"name\": self.pipeline_manifest.get(\"name\", os.path.dirname(self.schema_filename)).strip(\"'\"),\n \"description\": self.pipeline_manifest.get(\"description\", \"\").strip(\"'\"),\n }\n self.schema = json.loads(schema_template.render(template_vars))\n self.get_schema_defaults()", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema", "def api_node_schema(request):\n\n log.debug('schema requested')\n\n node = {\n }\n\n return node", "def schema(self):\n # NOTE This is exactly the same as the other thing.\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }", "async def _create_schema_request(\n self,\n public_info: DIDInfo,\n schema_json: str,\n write_ledger: bool = True,\n endorser_did: str = None,\n ):", "def create_schema(self, schema: str):\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the query type from the root schema. This can either return a string or None. The latter when the endpoint does not support queries.
def parse_query_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "queryType")
[ "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def parse_query_spec(self, query_spec):\n try:\n return self.QUERY_TYPE_MAP[query_spec['type']](query_spec)\n except KeyError:\n raise exceptions.QueryError('invalid query spec')\n except TypeError:\n raise exceptions.QueryError('Query must be a dictionary specifyng type and value of the query')", "def _schema_type(self) -> Optional[type]:\n pass", "def get_query_type(query):\n\n terms = query.split()\n # the query command may reveal itself a few terms into the command,\n # e.g. 'WITH RECURSIVE ... SELECT'. the command is a SELECT but can\n # only be classified by searching a few terms into the query.\n for start in range(0, min(4, len(terms))):\n # iterate with depth to include commands with multiple terms, e.g. 'SELECT INTO'\n for depth in range(4, 0, -1):\n if start+depth >= len(terms):\n continue\n cmd = \" \".join(terms[start:start+depth]).upper()\n if cmd in commands:\n return cmd.replace(\" \", \"_\").lower()\n return \"_unknown\"", "def _parse_type(self,uri):\n if uri:\n root_uri = uri.rpartition('/')[0]\n #print root_uri\n neo4j_type = root_uri.rpartition('/')[-1]\n return neo4j_type", "def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']", "def test_typename_queries(self):\n context = self._context()\n result = GraphQlExecutor.execute(\n \"query CheckTypeOfR2 {\\n\"\n \" hero {\\n\"\n \" __typename\\n\"\n \" name\\n\"\n \" }\\n\"\n \"}\\n\",\n context)\n self.assertEqual(\n {'data': {'hero': {'name': 'R2-D2', '__typename': 'Droid'}}},\n result)\n\n result = GraphQlExecutor.execute(\n \"query CheckTypeOfLuke {\\n\"\n \" hero(episode: EMPIRE) {\\n\"\n \" __typename\\n\"\n \" name\\n\"\n \" }\\n\"\n \"}\\n\",\n context)\n self.assertEqual(\n {\n 'data': {\n 'hero': {\n 'name': 'Luke Skywalker',\n '__typename': 'Human',\n },\n },\n }, result)", "def test_check_query_type(self):\n pass", "def query_template(self) -> Optional[str]:\n query_answer_dict = self.__get_query_answer_dict()\n if not query_answer_dict or 'sparql_template' not in query_answer_dict:\n return None\n assert type(query_answer_dict['sparql_template']) == str\n return query_answer_dict['sparql_template']", "def validate_query(self, query: dict):\n if \"type\" not in query:\n return False\n if not isinstance(query[\"type\"], str):\n return False\n if len(query) < 2:\n return False\n return True", "def parse_query(self, query_dict):\n if query_dict is None:\n return xapian.Query('') # Match everything\n elif query_dict == {}:\n return xapian.Query() # Match nothing\n\n query_tree = self.build_query_tree(query_dict)\n\n return query_tree.to_query(self.schema, self.database)", "def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")", "def schema_type(self):\n return self._schema.schema_type", "def _read_query(self):\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] not in '(@{':\n prev_offset = self._offset\n if self._read_identifier() != 'query':\n self._raise_exception('Expected \"query\"', prev_offset)\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] in '(@{':\n name = None\n else:\n name = self._read_identifier()\n self._read_ignored_tokens(False)\n\n variables = self._read_variables()\n directives = self._read_directives(GraphQlDirectiveLocation.query)\n selection_set = self._read_selection_set(\n self._schema.root_query_type())\n return GraphQlQuery(name, variables, selection_set, directives)", "def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")", "def make_query(graph, ns, request_schema, response_schema):\n @graph.route(\"/v1/foo/get\", Operation.Query, ns)\n @qs(request_schema)\n @response(response_schema)\n def foo_query():\n \"\"\"\n My doc string\n \"\"\"\n request_data = load_query_string_data(request_schema)\n response_data = dict(\n result=True,\n value=request_data[\"required_value\"],\n )\n return dump_response_data(response_schema, response_data, Operation.Query.value.default_code)", "def infer_value_type(self, value):\n if isinstance(value, str):\n if self.TIMESTAMP_MATCHER.match(value):\n return 'TIMESTAMP'\n elif self.DATE_MATCHER.match(value):\n return 'DATE'\n elif self.TIME_MATCHER.match(value):\n return 'TIME'\n elif not self.quoted_values_are_strings:\n # Implement the same type inference algorithm as 'bq load' for\n # quoted values that look like ints, floats or bools.\n if self.INTEGER_MATCHER.match(value):\n if int(value) < self.INTEGER_MIN_VALUE or \\\n self.INTEGER_MAX_VALUE < int(value):\n return 'QFLOAT' # quoted float\n else:\n return 'QINTEGER' # quoted integer\n elif self.FLOAT_MATCHER.match(value):\n return 'QFLOAT' # quoted float\n elif value.lower() in ['true', 'false']:\n return 'QBOOLEAN' # quoted boolean\n else:\n return 'STRING'\n else:\n return 'STRING'\n # Python 'bool' is a subclass of 'int' so we must check it first\n elif isinstance(value, bool):\n return 'BOOLEAN'\n elif isinstance(value, int):\n if value < self.INTEGER_MIN_VALUE or self.INTEGER_MAX_VALUE < value:\n return 'FLOAT'\n else:\n return 'INTEGER'\n elif isinstance(value, float):\n return 'FLOAT'\n elif value is None:\n return '__null__'\n elif isinstance(value, dict):\n if value:\n return 'RECORD'\n else:\n return '__empty_record__'\n elif isinstance(value, list):\n if value:\n return '__array__'\n else:\n return '__empty_array__'\n else:\n raise Exception('Unsupported node type: %s' % type(value))", "def get_query(self):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n if len(split) == 1: return \"\"\r\n else: return split[1]", "def _read_field_query(self, base_type):\n self._read_ignored_tokens(False)\n start = self._offset\n response_key = self._read_identifier()\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] != ':':\n field_name = response_key\n field_offset = start\n else:\n self._offset += 1\n self._read_ignored_tokens(False)\n field_offset = self._offset\n field_name = self._read_identifier()\n\n # Compute the GraphQlFieldDescriptor\n field_descriptor = self._schema.common_field_descriptor(field_name)\n if (field_descriptor is None and\n base_type == self._schema.root_query_type()):\n field_descriptor = self._schema.implicit_root_field_descriptor(\n field_name)\n if field_descriptor is None:\n try:\n field_descriptor = base_type.field_descriptor(field_name)\n except ValueError:\n self._raise_exception(\n '{:s} has no field named {:s}'.format(\n base_type.name, field_name),\n field_offset)\n\n args = self._read_args(field_descriptor.args)\n directives = self._read_directives(GraphQlDirectiveLocation.field)\n selection_set = self._read_selection_set(\n field_descriptor.field_type.base_type())\n field_query = GraphQlFieldQuery(\n response_key, field_descriptor, args, selection_set, directives)\n self._field_query_offsets[field_query] = start\n return field_query" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the mutation type from the root schema. This can either return a string or None. The latter when the endpoint does not support mutations.
def parse_mutation_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "mutationType")
[ "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def set_mutation_type(self, mut_type=''):\n if mut_type:\n # specified mutation type\n self.mutation_type = mut_type\n else:\n # interpret mutation type from attributes\n if not self.is_valid:\n # does not correctly fall into a category\n self.mutation_type = 'not valid'\n elif self.unknown_effect:\n self.mutation_type = 'unknown effect'\n elif self.is_missing_info:\n self.mutation_type = 'missing'\n elif self.is_substitution:\n self.mutation_type = 'substitution'\n elif self.is_deletion:\n self.mutation_type = 'deletion'\n elif self.is_insertion:\n self.mutation_type = 'insertion'\n\n # check if mutation at splice site\n self.__set_splice_mutation()", "def mutation(cls, tree):\n mutation = tree.child(0).value\n arguments = []\n if tree.arguments:\n arguments = cls.arguments(tree)\n if tree.command:\n mutation = tree.command.child(0).value\n return {'$OBJECT': 'mutation', 'mutation': mutation,\n 'arguments': arguments}", "def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")", "def _is_mutation_type(data):\n try:\n QuiverMutationType(data)\n return True\n except Exception:\n return False", "def _mutation_type_error(data):\n if data[2] is None:\n del data[2]\n return_str = str(data) + ' is not a valid quiver mutation type'\n return_str += '\\n Finite types have the form [ \\'?\\', n ] for type ? and rank n'\n return_str += '\\n Affine type A has the form [ \\'A\\', [ i, j ], 1 ] for rank i+j'\n return_str += '\\n Affine type ? has the form [ \\'?\\', k, \\pm 1 ] for rank k+1'\n return_str += '\\n Elliptic type ? has the form [ \\'?\\', k, [i, j] ] (1 <= i,j <= 3) for rank k+2'\n return_str += '\\n For correct syntax in other types, please consult the documentation.'\n\n raise ValueError(return_str)", "def _read_mutation(self):\n self._read_ignored_tokens(False)\n prev_offset = self._offset\n if self._read_identifier() != 'mutation':\n self._raise_exception('Expected \"mutation\"', prev_offset)\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] in '(@{':\n name = None\n else:\n name = self._read_identifier()\n self._read_ignored_tokens(False)\n variables = self._read_variables()\n directives = self._read_directives(GraphQlDirectiveLocation.mutation)\n mutation_type = self._schema.root_mutation_type()\n if mutation_type is None:\n self._raise_exception(\n 'The GraphQL schema does not have any mutations', prev_offset)\n selection_set = self._read_selection_set(mutation_type)\n return GraphQlMutation(name, variables, selection_set, directives)", "def _get_upload_mutation_name(self) -> str:\r\n return \"UploadMutation\"", "def get_random_mutation_type(self):\n return self.random_state.choice(\n self.mutation_types, p=self.mutation_probabilities)", "def input_type():\n return {\n 'clientMutationId': 'String!',\n 'name': 'String!',\n }", "def build_mutation(tree: ASTNode):\n\n if isinstance(tree.return_type, CreatePayloadType):\n return build_insert(tree)\n elif isinstance(tree.return_type, UpdatePayloadType):\n return build_update(tree)\n elif isinstance(tree.return_type, DeletePayloadType):\n return build_delete(tree)\n else:\n raise Exception(\"Unknown mutation type\")", "def _schema_type(self) -> Optional[type]:\n pass", "def _parse_type(self,uri):\n if uri:\n root_uri = uri.rpartition('/')[0]\n #print root_uri\n neo4j_type = root_uri.rpartition('/')[-1]\n return neo4j_type", "def getUnexpectedType(self):\n\n from antlr3.streams import TokenStream\n from antlr3.tree import TreeNodeStream\n\n if isinstance(self.input, TokenStream):\n return self.token.type\n\n elif isinstance(self.input, TreeNodeStream):\n adaptor = self.input.treeAdaptor\n return adaptor.getType(self.node)\n\n else:\n return self.c", "def _read_type(self):\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] == '[':\n self._offset += 1\n t = self._read_type()\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] != ']':\n self._raise_exception('Expected \"]\"', self._offset)\n self._offset += 1\n t = GraphQlListType(t)\n else:\n prev_offset = self._offset\n name = self._read_identifier()\n self._read_ignored_tokens(False)\n try:\n t = self._schema.get_type(name)\n except ValueError:\n self._raise_exception(\n 'There is no type named {:s}'.format(name), prev_offset)\n if self._document_str[self._offset] != '!':\n return t\n else:\n self._offset += 1\n return GraphQlNonNullType(t)", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")", "def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']", "def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")", "def get_token_type(token):\n return token[1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the subscription type from the root schema. This can either return a string or None. The latter when the endpoint does not support subscriptions.
def parse_subscription_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "subscriptionType")
[ "def subscription_type(self) -> str:\n return pulumi.get(self, \"subscription_type\")", "def smn_subscription_type(self):\n return self._smn_subscription_type", "def _get_subtlv_type(self):\n return self.__subtlv_type", "def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None", "def qualify_type(typ, namespace):\n\n if is_date_or_datetime(typ):\n return typ.split('.')[-1]\n elif is_avro_primitive(typ) or '.' in typ:\n return typ\n else:\n return '{0}.{1}'.format(namespace, typ)", "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def subtype_only(self):\n\n if self.subtype == None:\n return \"\"\n sTypeFull = self.get_subtype_display()\n arType = sTypeFull.split(\":\")\n return arType[1]", "def sub_service_type_name(self):\n return self._sub_service_type_name", "def prefix_type(self) -> Optional[str]:\n return pulumi.get(self, \"prefix_type\")", "def subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"subscription_id\")", "def sniff_type(text):\n for prefix in XML_PREFIXES:\n if text.startswith(prefix):\n return \"text/xml\"\n return None", "def resolve_topic_name(topic):\n\n if isinstance(topic, enum.Enum):\n return topic.value\n elif isinstance(topic, str):\n return topic\n else:\n return None", "def __str__(self):\n return self.subscription_type", "def get_xsd_type(self, item):\n if not self.xsd_types or isinstance(self.xsd_types, AbstractSchemaProxy):\n return\n elif isinstance(item, str):\n xsd_type = self.xsd_types.get(item)\n elif isinstance(item, AttributeNode):\n xsd_type = self.xsd_types.get(item[0])\n else:\n xsd_type = self.xsd_types.get(item.tag)\n\n if not xsd_type:\n return\n elif not isinstance(xsd_type, list):\n return xsd_type\n elif isinstance(item, AttributeNode):\n for x in xsd_type:\n if x.is_valid(item[1]):\n return x\n elif not isinstance(item, str):\n for x in xsd_type:\n if x.is_simple():\n if x.is_valid(item.text):\n return x\n elif x.is_valid(item):\n return x\n\n return xsd_type[0]", "def _schema_type(self) -> Optional[type]:\n pass", "def _parse_type(self,uri):\n if uri:\n root_uri = uri.rpartition('/')[0]\n #print root_uri\n neo4j_type = root_uri.rpartition('/')[-1]\n return neo4j_type", "def _read_type(self):\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] == '[':\n self._offset += 1\n t = self._read_type()\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] != ']':\n self._raise_exception('Expected \"]\"', self._offset)\n self._offset += 1\n t = GraphQlListType(t)\n else:\n prev_offset = self._offset\n name = self._read_identifier()\n self._read_ignored_tokens(False)\n try:\n t = self._schema.get_type(name)\n except ValueError:\n self._raise_exception(\n 'There is no type named {:s}'.format(name), prev_offset)\n if self._document_str[self._offset] != '!':\n return t\n else:\n self._offset += 1\n return GraphQlNonNullType(t)", "def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']", "def sub_resource_type_name(self):\n return self._sub_resource_type_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse an operation type from the root schema. This can either return a string or None. The latter when the endpoint does not support the passed by operation.
def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]: query_type = raw_schema.get(op_type, {}) if not query_type: return None return query_type.get("name")
[ "def get_operation_type(self, operation_name):\n # type: (Optional[str]) -> Optional[str]\n operations_map = self.operations_map\n if not operation_name and len(operations_map) == 1:\n return next(iter(operations_map.values()))\n return operations_map.get(operation_name)", "def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")", "def parse_operations(self, operation_type: str) -> Tuple[Operation]:\n if operation_type is None:\n return tuple()\n query_type: SchemaType = self.types.get(operation_type)\n if query_type is None:\n return tuple()\n return tuple([Operation(f, self.settings) for f in query_type.fields])", "def operation_type(self):\n return self._operation_type", "def parseoptype(self, buf, size):\n opparse = self.proto.epp_single_op\n opparse.restype = OperationType\n opparse.argtypes = [ct.c_char_p, ct.c_uint]\n\n return opparse(buf, size)", "def _get_operation_from_string(op):\n\tif op in ['union','U','u']:\n\t\treturn _fuzzy_union\n\n\telif op in ['intersection','u']:\n\t\treturn _fuzzy_intersection\n\n\telif op in ['difference','diff','d']:\n\t\treturn _fuzzy_diff\n\n\telif op in ['addition','add','a']:\n\t\treturn _fuzzy_add", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def _find_operation_from_path(path, operation):\n for op in path:\n if op.type == operation:\n return op\n return None", "def get_op_type(self):\n return self.op_type", "def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")", "def _get_singa_op_type(cls, op):\n return type(op).__name__", "def _operation_to_pooling_type(operation):\n splitted_operation = operation.split('_')\n return splitted_operation[0]", "def _parse_type(self,uri):\n if uri:\n root_uri = uri.rpartition('/')[0]\n #print root_uri\n neo4j_type = root_uri.rpartition('/')[-1]\n return neo4j_type", "def unaryop_type(cls, op):\n return None", "def parse_operation(self, data, ip):\n json_decoded = json.loads(data)\n op = json_decoded['OPERATION']\n if op in self._callbacks:\n self.logger.info(\"Got Operation: \" + op)\n self._callbacks[op](json_decoded, ip)\n else:\n self.logger.error(\"Unknown operation\")", "def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")", "def get_op(self, op_complete_url):\n url_parsed = urlsplit(op_complete_url)\n op_url = url_parsed.path\n\n conf, op = self.best_match(op_url)\n if op is not None:\n return Operation(\n op_complete_url,\n op,\n conf[\"conf\"][op],\n conf[\"tp\"],\n conf[\"sparql_http_method\"],\n conf[\"addon\"],\n )\n else:\n sc = 404\n return (\n sc,\n \"HTTP status code %s: the operation requested does not exist\" % sc,\n \"text/plain\",\n )", "def _read_operation_or_fragment(self):\n self._read_ignored_tokens(False)\n if self._document_str[self._offset] in '{(@':\n return self._read_query()\n else:\n prev_offset = self._offset\n identifier = self._read_identifier()\n self._offset = prev_offset\n if identifier == 'query':\n return self._read_query()\n elif identifier == 'mutation':\n return self._read_mutation()\n elif identifier == 'fragment':\n self._read_fragment(True)\n return None\n else:\n self._raise_exception(\n 'Expected \"query\", \"mutation\", or \"fragment\"', prev_offset)", "def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse all operations for a given operation type.
def parse_operations(self, operation_type: str) -> Tuple[Operation]: if operation_type is None: return tuple() query_type: SchemaType = self.types.get(operation_type) if query_type is None: return tuple() return tuple([Operation(f, self.settings) for f in query_type.fields])
[ "def parse_operation(self, data, ip):\n json_decoded = json.loads(data)\n op = json_decoded['OPERATION']\n if op in self._callbacks:\n self.logger.info(\"Got Operation: \" + op)\n self._callbacks[op](json_decoded, ip)\n else:\n self.logger.error(\"Unknown operation\")", "def parse(mode):\n operations = []\n\n operation = Operation()\n for _, char in enumerate(mode):\n if char in 'ugoa':\n _parse_source(char, mode, operation)\n continue\n\n if char in '-+=':\n operation = _parse_operator(char, mode, operation, operations)\n continue\n\n if char in BITS:\n _parse_bits(char, mode, operation)\n continue\n\n if not operation.source:\n operation.source = 'a'\n\n if not operation.operator:\n raise ValueError(\"Invalid string mode given: \" + mode)\n\n if char in FLAGS:\n _parse_flags(char, mode, operation)\n continue\n\n if char in COPY:\n _parse_copy(char, mode, operation)\n continue\n\n raise ValueError(\"Invalid string mode given: \" + mode)\n\n if not operation.target:\n raise ValueError(\"Invalid string mode given: \" + mode)\n\n if operation.target_mode != TARGET_MODE_BITS and not operation.operator:\n raise ValueError(\"Invalid string mode given: \" + mode)\n\n operations.append(operation)\n\n for operation in operations:\n if operation.target_mode == TARGET_MODE_BITS:\n operation.target = int(str(operation.target), 8) # to octal integer\n\n return operations", "def _ParseOp(self):\r\n left = self._term_parser()\r\n op = self._operator()\r\n commit()\r\n right = self._expr_parser()\r\n whitespace()\r\n node = self._op_classes[op](self._schema, left)\r\n return node.Merge(right)", "def parse_operations(resource, operations):\n for method in resource.methods:\n docstring = getattr(resource, method.lower()).__doc__\n if docstring:\n try:\n operation = yaml_utils.load_yaml_from_docstring(docstring)\n except yaml.YAMLError:\n operation = None\n if not operation:\n logging.getLogger(__name__).warning(\n 'Cannot load docstring for {}/{}'.format(resource, method))\n operations[method.lower()] = operation or dict()", "def _parse_operation_expression(expression):\n # Split groups by parentheses:\n # use the opening parenthesis as delimiter\n # and just remove the closing parenthesis\n expressions_per_step = expression.replace(\")\", \"\").split(\"(\")\n expressions_per_step = [e for e in expressions_per_step if len(e) > 0]\n # Important: Operations are applied from right to left\n expressions_per_step.reverse()\n\n operations = []\n for expr in expressions_per_step:\n if \"-\" in expr:\n # Range of operation IDs, they must be integers\n first, last = expr.split(\"-\")\n operations.append(\n [str(id) for id in range(int(first), int(last) + 1)]\n )\n elif \",\" in expr:\n # List of operation IDs\n operations.append(expr.split(\",\"))\n else:\n # Single operation ID\n operations.append([expr])\n\n # Cartesian product of operations\n return list(itertools.product(*operations))", "def _read_operations_and_fragments(self):\n operations = []\n operation_names = set()\n self._read_ignored_tokens(False)\n anonymous_offset = None\n while self._offset < len(self._document_str):\n prev_offset = self._offset\n operation = self._read_operation_or_fragment()\n if operation is not None:\n # Validate the \"one anonymous operation\" rule\n if operation.name is None and anonymous_offset is None:\n anonymous_offset = prev_offset\n if (operations and\n (operation.name is None or None in operation_names)):\n if operations:\n self._raise_exception(\n 'An operation may only be anonymous if it is the '\n 'only one',\n anonymous_offset)\n if operation.name in operation_names:\n self._raise_exception(\n 'There are multiple operations named {:s}'.format(\n operation.name),\n prev_offset)\n\n operations.append(operation)\n operation_names.add(operation.name)\n self._read_ignored_tokens(True)\n return operations", "def parse_operation(args):\n import argparse\n \n def to_bool(s):\n if s.lower() in (\"yes\", \"1\", \"true\", \"t\", \"y\"):\n return True\n return False\n\n parser = argparse.ArgumentParser(description=\"Operation and options\")\n parser.register('type', 'bool', to_bool)\n\n if not args:\n args = [\"backup\"]\n\n ops = parser.add_subparsers(help='sub-operation help', dest='command')\n\n # The current valid operations are backup, restore, list, verify, and delete\n # Although only backup and restore are currently implemented\n backup_operation = ops.add_parser(\"backup\", help=\"Backup command\")\n\n restore_operation = ops.add_parser(\"restore\", help='Restore command')\n\n verify_operation = ops.add_parser(\"verify\", help='Verify command')\n verify_operation.add_argument(\"--all\", action='store_true', dest='check_all',\n help='Check every backup for consistency',\n default=False)\n \n delete_operation = ops.add_parser('delete', help='Delete command')\n\n list_operation = ops.add_parser(\"list\", help='List command')\n \n rv = parser.parse_args(args)\n return rv", "def __create_elementary_operations(self, cmd_type: str, cmd_int: int):\n\n if cmd_type == \"G\":\n if cmd_int > 3:\n for operation in self.g_commands[str(cmd_int)]:\n self.elementary_operations.append(lambda: operation())\n else:\n path = self.__build_path()\n self.elementary_operations = self.__create_movement_operations(\n path)\n\n elif cmd_type == \"M\":\n for operation in self.m_commands[str(cmd_int)]:\n self.elementary_operations.append(lambda: operation())\n\n elif cmd_type == \"T\":\n for operation in self.t_commands[str(cmd_int)]:\n self.elementary_operations.append(lambda: operation())\n\n self.elementary_operations.append(lambda: self.__calibrate_tool())", "def classify_operations(self, length_edit, length_delete):\n for op in self.operations:\n # Classify the type according to the length of the operation\n len_op = op.get_length_of_op()\n if len_op >= length_edit:\n if len(op.elem_ops) == 1:\n op.type = 'paste'\n else:\n op.type = 'write'\n elif len_op <= -length_delete:\n op.type = 'delete'\n elif len(op.elem_ops) == 1 \\\n and op.elem_ops[0].operation_type == \"add\" \\\n and op.elem_ops[0].text_to_add == '\\n':\n op.type = 'jump'\n else:\n op.type = 'edit'", "def __operations(self, conf):\n result = \"\"\"## Operations [back to top](#toc)\nThe operations that this API implements are:\n\"\"\"\n ops = \"\\n\"\n\n for op in conf[\"conf_json\"][1:]:\n params = []\n for p in findall(PARAM_NAME, op[\"url\"]):\n p_type = \"str\"\n p_shape = \".+\"\n if p in op:\n p_type, p_shape = findall(\"^\\s*([^\\(]+)\\((.+)\\)\\s*$\", op[p])[0]\n\n params.append(\n \"<em>%s</em>: type <em>%s</em>, regular expression shape <code>%s</code>\"\n % (p, p_type, p_shape)\n )\n result += \"\\n* [%s](#%s): %s\" % (\n op[\"url\"],\n op[\"url\"],\n op[\"description\"].split(\"\\n\")[0],\n )\n ops += \"\"\"<div id=\"%s\">\n<h3>%s <a href=\"#operations\">back to operations</a></h3>\n\n%s\n\n<p class=\"attr\"><strong>Accepted HTTP method(s)</strong> <span class=\"attr_val method\">%s</span></p>\n<p class=\"attr params\"><strong>Parameter(s)</strong> <span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Result fields type</strong><span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Example</strong><span class=\"attr_val\"><a target=\"_blank\" href=\"%s\">%s</a></span></p>\n<p class=\"ex attr\"><strong>Exemplar output (in JSON)</strong></p>\n<pre><code>%s</code></pre></div>\"\"\" % (\n op[\"url\"],\n op[\"url\"],\n markdown(op[\"description\"]),\n \", \".join(split(\"\\s+\", op[\"method\"].strip())),\n \"</li><li>\".join(params),\n \", \".join(\n [\n \"%s <em>(%s)</em>\" % (f, t)\n for t, f in findall(FIELD_TYPE_RE, op[\"field_type\"])\n ]\n ),\n conf[\"website\"] + conf[\"base_url\"] + op[\"call\"],\n op[\"call\"],\n op[\"output_json\"],\n )\n return markdown(result) + ops", "def op_parse_calls(text):\n\n # remove comments just for this call\n text = comment_remover(text)\n\n inits = len(re.findall('op_init', text))\n exits = len(re.findall('op_exit', text))\n parts = len(re.findall('op_partition', text))\n hdf5s = len(re.findall('hdf5', text))\n\n return (inits, exits, parts, hdf5s)", "def get_op_types(self):\n return self.cur_config['ops']", "def get_operation(self):\n\n operation = OrderedDict(\n tags=self.parser.get_tags(),\n summary=self.parser.get_summary(),\n description=self.parser.get_description(),\n parameters=self.parameters,\n produces=None,\n consumes=None,\n responses=self.responses,\n security=self.security\n )\n\n for key, value in list(operation.items()):\n # Remove empty keys\n if not value:\n operation.pop(key)\n\n return operation", "def _pull_argops(op_dict):\n import inspect\n out = []\n keys = op_dict.keys()\n keys.sort() # Not necessary, but makes scanning the printout easier\n for k in keys:\n # Create a dictionary that will be used to fill the 'code' template\n d = {}\n d[\"enum_name\"] = enum_name = op_dict[k][3:] # <NAME>\n d[\"funct_name\"] = \"%s\" % enum_name.lower() # <name>\n class_name = \"%s4args\" % enum_name\n klass = getattr(_type, class_name, None)\n if klass is None:\n # This operation takes no arguments\n d[\"funct_args\"] = d[\"create_args\"] = d[\"set_args\"] = \"\"\n else:\n if type(klass) is dict:\n arg_list = \"enum_value\"\n d[\"create_args\"] = \"args = enum_value\"\n else:\n arg_list = \", \".join(inspect.getargspec(klass.__init__)[0][1:])\n d[\"create_args\"] = \"args = _type.%s(%s)\" % (class_name, arg_list)\n d[\"funct_args\"] = arg_list\n if enum_name.startswith(\"CB_\"):\n d[\"set_args\"] = \"opcb%s=args\" % enum_name.lower()[3:]\n else:\n d[\"set_args\"] = \"op%s=args\" % enum_name.lower()\n if enum_name.startswith(\"CB_\"):\n d[\"argop\"] = \"nfs_cb_argop4\"\n else:\n d[\"argop\"] = \"nfs_argop4\"\n out.append(d)\n return out", "def load_operations(elem, symast, symimp):\n operations = elem.findall(\".//Operation\")\n return [load_operation(op, symast, symimp) for op in operations]", "def operations_map(self):\n # type: () -> Dict[Union[str, None], str]\n document_ast = self.document_ast\n operations = {} # type: Dict[Union[str, None], str]\n for definition in document_ast.definitions:\n if isinstance(definition, ast.OperationDefinition):\n if definition.name:\n operations[definition.name.value] = definition.operation\n else:\n operations[None] = definition.operation\n\n return operations", "def json_operations(self):\n if self.only_ops or self.only_virtual_ops:\n return self[\"operations\"]\n ops = []\n for tx in self[\"transactions\"]:\n for op in tx[\"operations\"]:\n if \"operations\" not in tx:\n continue\n # Replace opid by op name\n # op[0] = getOperationNameForId(op[0])\n if isinstance(op, list):\n op_new = list(op)\n else:\n op_new = op.copy()\n if 'timestamp' in op:\n p_date = op.get('timestamp', datetime(1970, 1, 1, 0, 0))\n if isinstance(p_date, (datetime, date)):\n op_new.update({'timestamp': formatTimeString(p_date)})\n ops.append(op_new)\n return ops", "def get_all_operations(self):\n raise NotImplementedError(\n 'operation get_all_operations(...) not yet implemented')", "def parse_inputs(input_nodes, ops):\n if len(input_nodes) != len(ops):\n raise ParseException('number of inputs did not match ops')\n\n wildcard = False\n\n if 'Any' in ops:\n if ops.index('Any') != len(ops) - 1:\n raise ParseException('`Any` must be last arg')\n wildcard = True\n ops.pop()\n\n out_nodes = []\n for op in ops:\n for node in input_nodes:\n if node not in out_nodes and node.op == op:\n out_nodes.append(node)\n\n if wildcard:\n for node in input_nodes:\n if node not in out_nodes:\n out_nodes.append(node)\n\n if len(out_nodes) != len(ops) + wildcard:\n raise ParseException('not enough nodes were matched')\n return out_nodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a list of arguments into a dictionary where the key is the name of the argument and the argument itself is the value.
def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]': if not args: return {} result = {} for a in args: if not a: continue arg = Argument(a) result[arg.name] = arg return result
[ "def split_args(args):\r\n if not args:\r\n return {}\r\n # Handle the old comma separated argument format.\r\n if len(args) == 1 and not re_new_args.search(args[0]):\r\n args = args[0].split(',')\r\n # Separate out the key and value for each argument.\r\n args_dict = {}\r\n for arg in args:\r\n split_arg = arg.split('=', 1)\r\n value = len(split_arg) > 1 and split_arg[1] or None\r\n args_dict[split_arg[0]] = value\r\n return args_dict", "def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result", "def parse(args: list, keyword_set: set) -> dict:\n parsed_dict = {'': []}\n while args:\n keyword = get_keyword(arg=args[0], keyword_set=keyword_set)\n\n if keyword is not None:\n args.pop(0)\n keyword_name = keyword.keyword_name\n\n if keyword_name in parsed_dict:\n raise necrobot.exception.DoubledArgException(keyword=keyword.keyword)\n\n if keyword.param_for is not None:\n parsed_dict[keyword_name] = [keyword.keyword]\n else:\n parsed_dict[keyword_name] = []\n num_args_pulled = 0\n while num_args_pulled < keyword.num_args:\n if not args:\n raise necrobot.exception.NumParametersException(\n keyword=keyword,\n num_expected=keyword.num_args,\n num_given=num_args_pulled\n )\n else:\n num_args_pulled += 1\n parsed_dict[keyword_name].append(args[0])\n args.pop(0)\n else:\n parsed_dict[''].append(args[0])\n args.pop(0)\n\n return parsed_dict", "def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary", "def parse_argv():\n d = {}\n l = []\n for v in sys.argv[1:]:\n if v[:2] == '--':\n try:\n k, v = v[2:].split('=')\n d[k] = v\n except ValueError:\n d[v[2:]] = None\n else:\n l.append(v)\n return d, l", "def parse_launch_arguments(launch_arguments: List[Text]) -> List[Tuple[Text, Text]]:\n parsed_launch_arguments = OrderedDict() # type: ignore\n for argument in launch_arguments:\n count = argument.count(':=')\n if count == 0 or argument.startswith(':=') or (count == 1 and argument.endswith(':=')):\n raise RuntimeError(\n \"malformed launch argument '{}', expected format '<name>:=<value>'\"\n .format(argument))\n name, value = argument.split(':=', maxsplit=1)\n parsed_launch_arguments[name] = value # last one wins is intentional\n return parsed_launch_arguments.items()", "def parse_arguments(args):", "def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict", "def parse_agent_args(args):\n if not args:\n return {}\n pieces = args.split(',')\n opts = {}\n for p in pieces:\n if '=' in p:\n key, val = p.split('=')\n else:\n key, val = p, 1\n opts[key] = val\n return opts", "def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")", "def parse_args(self, args):\n\t\targdict = {}\n\t\tflaglist = []\n\n\t\tfor arg in args:\n\t\t\tif \"=\" in arg:\n\t\t\t\ta = arg.split(\"=\", 1)\n\t\t\t\targdict[a[0]] = a[1]\n\t\t\telse:\n\t\t\t\tflaglist.append(arg)\n\n\t\tself.flags = flaglist\n\t\tself.args = argdict", "def arg_to_dict(arg, separator=','):\n if not arg:\n return {}\n result = dict(subString.split(\"=\") for subString in (arg).split(separator))\n return result", "def parse_cmdline_kwargs(args):\n\n def parse(v):\n assert isinstance(v, str)\n try:\n return eval(v)\n except (NameError, SyntaxError):\n return v\n\n return {k: parse(v) for k, v in parse_unknown_args(args).items()}", "def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value", "def parse(self, arg_list):\n\n if self._meta.ignore_unknown_arguments is True:\n args, unknown = self.parse_known_args(arg_list)\n self.parsed_args = args\n self.unknown_args = unknown\n else:\n args = self.parse_args(arg_list)\n self.parsed_args = args\n return self.parsed_args", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def process_cli_config_args(config_args:List[str]) -> Dict:\n # assert len(config_args) % 3 == 0, \\\n # \"You should pass config args in [--config.arg_name arg_value arg_type] format\"\n assert len(config_args) % 2 == 0, \\\n \"You should pass config args in [--config.arg_name arg_value] format\"\n arg_names = [config_args[i] for i in range(0, len(config_args), 2)]\n arg_values = [config_args[i] for i in range(1, len(config_args), 2)]\n\n result = {}\n\n for name, value in zip(arg_names, arg_values):\n assert name.startswith(CONFIG_ARG_PREFIX), \\\n f\"Argument {name} is unkown and does not start with `config.` prefix. Cannot parse it.\"\n\n result[name[len(CONFIG_ARG_PREFIX):]] = infer_type_and_convert(value)\n\n return result", "def _parse_arguments(self, action, argv):\n arguments = {}\n leftover = []\n\n dash_dash_index = None\n try:\n dash_dash_index = argv.index('--')\n except ValueError:\n dash_dash_index = len(argv)\n maybe_positionals = argv[:dash_dash_index]\n positional_only = argv[dash_dash_index + 1:]\n\n positionals = []\n for arg in maybe_positionals:\n (leftover\n if self._is_option(arg)\n else positionals).append(arg)\n positionals += positional_only\n leftover += positionals[len(action.arguments):]\n\n args = zip(positionals, action.arguments.items())\n for arg, (name, mapper) in args:\n arguments[name] = mapper.__call__(arg)\n\n return arguments, leftover", "def parse_input_args(input_str: str):\n output_dict = {}\n if not input_str:\n raise ValueError(\"Empty input string: {}\".format(input_str))\n\n key_pairs: list = input_str.split(\",\")\n\n key_pairs = [x.strip() for x in key_pairs]\n\n if not key_pairs:\n raise ValueError(\"Incorrect format: {}\".format(input_str))\n\n for each_key in key_pairs:\n try:\n key, value = each_key.split(\"=\")\n except ValueError as value_error:\n raise ValueError(\"Expected input format \"\n \"'key1=value1, key2=value2' \"\n \"but received {}\".format(input_str)) \\\n from value_error\n if value.isdigit():\n value = int(value)\n output_dict[key] = value\n\n return output_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a list of directives into a dictionary where the key is the name of the directive and the value is the directive itself.o
def parse_directives(schema_directives: List[Dict]) -> Dict[str, Directive]: result = {} for schema_directive in schema_directives: new_directive = Directive(schema_directive) result[new_directive.name] = new_directive return result
[ "def directives():\n cmd = \"{} -L\".format(_detect_os())\n ret = {}\n out = __salt__[\"cmd.run\"](cmd)\n out = out.replace(\"\\n\\t\", \"\\t\")\n for line in out.splitlines():\n if not line:\n continue\n comps = line.split(\"\\t\")\n desc = \"\\n\".join(comps[1:])\n ret[comps[0]] = desc\n return ret", "def _parse_pbs_directives(self):\n # Get general directives\n matches = re.findall('#PBS -(.*)', self.pbs_body)\n directives = {k: v for k, v in [(i.split() + [''])[:2] for i in matches]}\n # Get l directives\n l_matches = re.findall('#PBS -l (.*)', self.pbs_body)\n d = dict()\n for match in l_matches:\n if 'walltime' in match:\n d['walltime'] = match.split('=')[1]\n else:\n d.update({k: v for k, v in [i.split('=') for i in l_matches[0].split(':')]})\n\n directives['l'] = d\n return directives", "def directives(self, directive):\n signature_regex = compile(\"^\\w+:[\\w\\.]+:\\d+:[\\w\\.]+:[\\w/]+$\")\n\n if directive is None:\n raise ValueError(\"A directive name must be given.\")\n if not isinstance(directive, dict):\n raise TypeError(\"The directive name must be a dictionary, not %s.\" % (type(directive)))\n if 'signature' not in directive.keys():\n raise ValueError(\"A directive is expected to have a 'signature'.\")\n if not isinstance(directive['signature'], str):\n raise TypeError(\"The signature is expected as a string, not %s.\" % (type(directive['signature'])))\n if not signature_regex.match(directive['signature']):\n raise ValueError(\"A signature must have the following format: 'alias:ip:port:server_name:location'\")\n\n if directive not in self._directives:\n self._directives.append(directive)\n\n self._build()", "def parse_directive(line):\n composite = list()\n pointer = line.find(\"#\")\n composite.append(line[0: pointer])\n composite.append(line[pointer + 1: len(line) - 1])\n return composite", "def get_definitions(wlist):\n ddict = {}\n for word in wlist:\n text = get_def_page(word)\n defs = extract_defs(text)\n ddict[word] = defs\n return ddict", "def directives(self):\n return self._directives", "def read_preprocessing_directives(filename):\n r = {}\n with open(filename, 'r') as f:\n for line in f.readlines():\n line = line.strip()\n if line.startswith('#$'):\n if not '=' in line:\n logger.warning(\n 'Preprocessing directive does not contain \\'=\\'; skipping: \\'%s\\'',\n line\n )\n continue\n line = line.lstrip('#$')\n key, value = [c.strip() for c in line.split('=', 1)]\n r[key.lower()] = value\n continue\n return r", "def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg", "def directive(dicts=None):\n global directive_names\n\n if isinstance(dicts, six.string_types):\n dicts = (dicts,)\n\n if not isinstance(dicts, Sequence):\n message = \"dicts arg must be list, tuple, or string. Found {0}\"\n raise TypeError(message.format(type(dicts)))\n\n # Add the dictionary names if not already there\n DirectiveMeta._directive_dict_names |= set(dicts)\n\n # This decorator just returns the directive functions\n def _decorator(decorated_function):\n directive_names.append(decorated_function.__name__)\n\n @functools.wraps(decorated_function)\n def _wrapper(*args, **kwargs):\n # Inject when arguments from the context\n if DirectiveMeta._when_constraints_from_context:\n # Check that directives not yet supporting the when= argument\n # are not used inside the context manager\n if decorated_function.__name__ == \"version\":\n msg = (\n 'directive \"{0}\" cannot be used within a \"when\"'\n ' context since it does not support a \"when=\" '\n \"argument\"\n )\n msg = msg.format(decorated_function.__name__)\n raise DirectiveError(msg)\n\n when_constraints = [\n spack.spec.Spec(x) for x in DirectiveMeta._when_constraints_from_context\n ]\n if kwargs.get(\"when\"):\n when_constraints.append(spack.spec.Spec(kwargs[\"when\"]))\n when_spec = spack.spec.merge_abstract_anonymous_specs(*when_constraints)\n\n kwargs[\"when\"] = when_spec\n\n # If any of the arguments are executors returned by a\n # directive passed as an argument, don't execute them\n # lazily. Instead, let the called directive handle them.\n # This allows nested directive calls in packages. The\n # caller can return the directive if it should be queued.\n def remove_directives(arg):\n directives = DirectiveMeta._directives_to_be_executed\n if isinstance(arg, (list, tuple)):\n # Descend into args that are lists or tuples\n for a in arg:\n remove_directives(a)\n else:\n # Remove directives args from the exec queue\n remove = next((d for d in directives if d is arg), None)\n if remove is not None:\n directives.remove(remove)\n\n # Nasty, but it's the best way I can think of to avoid\n # side effects if directive results are passed as args\n remove_directives(args)\n remove_directives(list(kwargs.values()))\n\n # A directive returns either something that is callable on a\n # package or a sequence of them\n result = decorated_function(*args, **kwargs)\n\n # ...so if it is not a sequence make it so\n values = result\n if not isinstance(values, Sequence):\n values = (values,)\n\n DirectiveMeta._directives_to_be_executed.extend(values)\n\n # wrapped function returns same result as original so\n # that we can nest directives\n return result\n\n return _wrapper\n\n return _decorator", "def parse_definitions(divs):\n definitions = []\n for div in divs:\n uls = div.find_all('ul', attrs={'class': 'list_search'})\n definitions.append([u' '.join(ul.stripped_strings) for ul in uls])\n return definitions", "def find_regions(directives):\n regions = {}\n for directive in directives:\n if directive.startswith(\"sequence-region\"):\n try:\n _, accession, start, end = directive.split(\" \")\n regions[accession] = (int(start), int(end))\n except ValueError:\n # likely sequence-region without coordinates\n pass\n return regions", "def parse_list(constant_list):\n\n values = dict()\n descriptions = dict()\n for (key, value, desc) in constant_list:\n values[key] = value\n descriptions[value] = desc\n return (values, descriptions)", "def export_commentary_text_as_dictionary(commentary_parts_list):\n verse_string = str(commentary_parts_list[0])\n header_string = str(commentary_parts_list[1])\n \n verse = re.search(r\"\\[(\\d+)\\]\", verse_string).group(1)\n header = re.search(r'\\<u\\>\\s*\"(.+)\"\\s*\\<\\/u\\>', header_string).group(1)\n\n commentary_text = commentary_parts_list[2].replace(\": \", \"\")\n key = verse + \"__\" + header\n \n return key, commentary_text.strip()", "def StartDirectives(self) -> CodeDirectiveCollection:", "def get_ud_fragments(pp):\n pred_deps = []\n arg2deps = {}\n for predicate in pp.instances:\n # Get dep parses for the predicate.\n for token in predicate.tokens:\n # (head, relation, dependent)\n if token.gov:\n dep = (token.gov.text, token.gov.position, token.gov_rel,\n token.text, token.position)\n else:\n dep = (None, None, token.gov_rel, token.text, token.position)\n pred_deps.append(dep)\n\n # Get dep parses for the arguments.\n for argument in predicate.arguments:\n arg_deps = []\n for token in argument.tokens:\n if token.gov:\n dep = (token.gov.text, token.gov.position, token.gov_rel,\n token.text, token.position)\n else:\n dep = (None, None, token.gov_rel, token.text,\n token.position)\n arg_deps.append(dep)\n arg2deps[argument.position] = arg_deps\n return pred_deps, arg2deps", "def to_dict(tags: list):\n result = {}\n for tag in tags:\n result[tag.name] = tag.get_text()\n return result", "def makeGcauCfgDictFromAgc(lineList): \r\n diction = {}\r\n withinCfgData = False\r\n for eachString in lineList:\r\n if re.match(RE_COMPILED_CFG_START, eachString):\r\n withinCfgData = True\r\n elif re.match(RE_COMPILED_CFG_END, eachString):\r\n withinCfgData = False\r\n elif withinCfgData:\r\n p = re.match(RE_COMPILED_CFG_ITEM, eachString)\r\n if p:\r\n obj = p.groups()[0]\r\n attr = p.groups()[1]\r\n val = p.groups()[2]\r\n if obj not in diction:\r\n diction[obj] = {}\r\n diction[obj][attr] = val\r\n return diction", "def _dict_from_list(taglist):\r\n return {tag['Key']: tag['Value'] for tag in taglist if tag} \\\r\n if taglist else {}", "def _process_directives(self, db):\n term = Term('_directive')\n directive_node = db.find(term)\n if directive_node is not None:\n directives = db.get_node(directive_node).children\n\n gp = LogicFormula()\n while directives:\n current = directives.pop(0)\n self.execute(current, database=db, context=self.create_context((), define=None),\n target=gp)\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a Unity Environment and a QNetwork, this method will generate a buffer of Experiences obtained by running the Environment with the Policy derived from the QNetwork.
def generate_trajectories( env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float ): # Create an empty Buffer buffer: Buffer = [] # Reset the environment env.reset() # Read and store the Behavior Name of the Environment behavior_name = list(env.behavior_specs)[0] # Read and store the Behavior Specs of the Environment spec = env.behavior_specs[behavior_name] # Create a Mapping from AgentId to Trajectories. This will help us create # trajectories for each Agents dict_trajectories_from_agent: Dict[int, Trajectory] = {} # Create a Mapping from AgentId to the last observation of the Agent dict_last_obs_from_agent: Dict[int, np.ndarray] = {} # Create a Mapping from AgentId to the last observation of the Agent dict_last_action_from_agent: Dict[int, np.ndarray] = {} # Create a Mapping from AgentId to cumulative reward (Only for reporting) dict_cumulative_reward_from_agent: Dict[int, float] = {} # Create a list to store the cumulative rewards obtained so far cumulative_rewards: List[float] = [] while len(buffer) < buffer_size: # While not enough data in the buffer # Get the Decision Steps and Terminal Steps of the Agents decision_steps, terminal_steps = env.get_steps(behavior_name) # For all Agents with a Terminal Step: for agent_id_terminated in terminal_steps: # Create its last experience (is last because the Agent terminated) last_experience = Experience( obs=dict_last_obs_from_agent[agent_id_terminated].copy(), reward=terminal_steps[agent_id_terminated].reward, done=not terminal_steps[agent_id_terminated].interrupted, action=dict_last_action_from_agent[agent_id_terminated].copy(), next_obs=terminal_steps[agent_id_terminated].obs[0], ) # Clear its last observation and action (Since the trajectory is over) dict_last_obs_from_agent.pop(agent_id_terminated) dict_last_action_from_agent.pop(agent_id_terminated) # Report the cumulative reward cumulative_reward = ( dict_cumulative_reward_from_agent.pop(agent_id_terminated) + terminal_steps[agent_id_terminated].reward ) cumulative_rewards.append(cumulative_reward) # Add the Trajectory and the last experience to the buffer buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated)) buffer.append(last_experience) # For all Agents with a Decision Step: for agent_id_decisions in decision_steps: # If the Agent does not have a Trajectory, create an empty one if agent_id_decisions not in dict_trajectories_from_agent: dict_trajectories_from_agent[agent_id_decisions] = [] dict_cumulative_reward_from_agent[agent_id_decisions] = 0 # If the Agent requesting a decision has a "last observation" if agent_id_decisions in dict_last_obs_from_agent: # Create an Experience from the last observation and the Decision Step exp = Experience( obs=dict_last_obs_from_agent[agent_id_decisions].copy(), reward=decision_steps[agent_id_decisions].reward, done=False, action=dict_last_action_from_agent[agent_id_decisions].copy(), next_obs=decision_steps[agent_id_decisions].obs[0], ) # Update the Trajectory of the Agent and its cumulative reward dict_trajectories_from_agent[agent_id_decisions].append(exp) dict_cumulative_reward_from_agent[agent_id_decisions] += ( decision_steps[agent_id_decisions].reward ) # Store the observation as the new "last observation" dict_last_obs_from_agent[agent_id_decisions] = ( decision_steps[agent_id_decisions].obs[0] ) # Generate an action for all the Agents that requested a decision # Compute the values for each action given the observation actions_values = ( q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy() ) # Pick the best action using argmax print("ACTION VALS", actions_values) actions_values += epsilon * ( np.random.randn(actions_values.shape[0], actions_values.shape[1]) ).astype(np.float32) actions = np.argmax(actions_values, axis=1) actions.resize((len(decision_steps), 1)) # Store the action that was picked, it will be put in the trajectory later for agent_index, agent_id in enumerate(decision_steps.agent_id): dict_last_action_from_agent[agent_id] = actions[agent_index] # Set the actions in the environment # Unity Environments expect ActionTuple instances. action_tuple = ActionTuple() action_tuple.add_discrete(actions) env.set_actions(behavior_name, action_tuple) # Perform a step in the simulation env.step() return buffer, np.mean(cumulative_rewards)
[ "def collect_gameplay_experiences(env, agent, buffer):\n state = env.reset()\n done = False\n while not done:\n action = agent.collect_policy(state)\n next_state, reward, done, _ = env.step(action)\n if done:\n reward = -1.0\n buffer.store_gameplay_experience(state, next_state,\n reward, action, done)\n state = next_state", "def run(self):\n # 3. Get initial state from environment\n obs = self.obs_preproc(self.env.reset())\n ep_reward = []\n for t in range(self.T):\n with torch.no_grad():\n qS_t = self.Q(torch.from_numpy(obs).unsqueeze(0).float())[2].squeeze().numpy()\n # 5. Select the action using the current policy\n action = self.policy(qS_t)\n # 6. Apply action in the environment\n next_obs, reward, done, _ = self.env.step(action)\n # 7. Add data to local buffer\n self.local_experience_buffer.add(Transition(obs, action, reward , self.gamma, qS_t))\n obs = self.obs_preproc(next_obs)\n ep_reward.append(reward)\n print(\"Actor#\", self.actor_id, \"t=\", t, \"action=\", action, \"reward:\", reward, \"1stp_buf_size:\", self.local_experience_buffer.B, end='\\r')\n\n if done: # Not mentioned in the paper's algorithm\n # Truncate the n-step transition as the episode has ended; NOTE: Reward is set to 0\n self.local_experience_buffer.construct_nstep_transition(Transition(obs, action, 0, self.gamma, qS_t))\n # Reset the environment\n obs = self.obs_preproc(self.env.reset())\n print(\"Actor#:\", self.actor_id, \"t:\", t, \" ep_len:\", len(ep_reward), \" ep_reward:\", np.sum(ep_reward))\n ep_reward = []\n\n # 8. Periodically send data to replay\n if self.local_experience_buffer.size >= self.params['n_step_transition_batch_size']:\n # 9. Get batches of multi-step transitions\n n_step_experience_batch = self.local_experience_buffer.get(self.params['n_step_transition_batch_size'])\n # 10.Calculate the priorities for experience\n priorities = self.compute_priorities(n_step_experience_batch)\n # 11. Send the experience to the global replay memory\n self.global_replay_queue.put([priorities, n_step_experience_batch])\n\n if t % self.params['Q_network_sync_freq'] == 0:\n # 13. Obtain latest network parameters\n self.Q.load_state_dict(self.shared_state[\"Q_state_dict\"])", "def load_experience(h5file):\n return Experience_Buffer(states=h5file['experience']['states'],actions=h5file['experience']['actions'],rewards=h5file['experience']['rewards'], advantages=h5file['experience']['advantages']\n )", "def append(self, experience: Experience) -> None:\n self.buffer.append(experience)", "def store_experience(self, obs, action, reward, done, obs_next):\n exp = np.concatenate([obs, [action], [reward], [done], obs_next])\n self.replay_buffer[self.exp_cnt%self.replay_buffer_sz, :] = exp\n self.exp_cnt += 1", "def store_experience(self, obs, action, reward, done, obs_next):\n exp = np.concatenate([obs, action, [reward], [done], obs_next])\n self.replay_buffer[self.exp_cnt%self.replay_buffer_sz, :] = exp\n self.exp_cnt += 1", "def add_experience(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n if self.num_replay_samples < self.buffer_size:\n self.buffer.append(e)\n self.num_replay_samples += 1\n else:\n # make some room, remove the oldest experience.\n self.buffer.popleft()\n self.buffer.append(e)\n return", "def accumulate_experience(teacher, exp_replay: Supervised_ExperienceReplay, config=student_config):\n\n env = gym.make(\"PongNoFrameskip-v4\")\n env = wrap_deepmind(env, frame_stack=True)\n steps = 0\n while 1:\n state = env.reset()\n state = np.asarray(state)\n done = False\n while not done:\n steps += 1\n teacher_q_value = teacher.get_q(state=np.reshape(state, (1, state.shape[0], state.shape[1], state.shape[2])))\n action = teacher.select_action(teacher_q_value)\n next_state, reward, done, _ = env.step(action + 1)\n next_state = np.asarray(next_state)\n exp_replay.add_memory(state, teacher_q_value, action) # feeding the experience replay\n state = next_state\n if steps > config.OBSERVE: # we have OBSERVE number of exp in exp_replay\n try:\n del env\n except ImportError:\n pass\n break", "def experience():\n return ExperienceFactory()", "def sample_trajectory(self, env, animate_this_episode, is_evaluation):\n # Using current task with meta inside\n env.reset_task(is_evaluation=is_evaluation)\n stats = []\n #====================================================================================#\n # ----------PROBLEM 2----------\n #====================================================================================#\n ep_steps = 0\n steps = 0\n\n num_samples = max(self.history, self.max_path_length + 1)\n meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))\n rewards = []\n\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n\n if ep_steps == 0:\n ob = env.reset()\n # first meta ob has only the observation\n # set a, r, d to zero, construct first meta observation in meta_obs\n # YOUR CODE HERE\n ac = np.zeros(self.ac_dim); rew = np.zeros(self.reward_dim); done = np.zeros(self.terminal_dim)\n meta_obs[steps, :] = np.concatenate((ob, ac, rew, done))\n steps += 1\n\n # index into the meta_obs array to get the window that ends with the current timestep\n # please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)\n # YOUR CODE HERE\n # padding for input obs size\n sample_action_in_ = meta_obs[steps-self.history:steps, :] if steps>=self.history else np.squeeze(np.concatenate(([meta_obs[0,:], ] * (self.history - steps), meta_obs[:steps, :]), axis=0))\n # need to clear hidden size, in order to avoid previous hidden state as it may be generated by the other totally different task (env setting may be changed)\n hidden = np.zeros((1, self.gru_size), dtype=np.float32)\n\n # get action from the policy\n # YOUR CODE HERE\n # Tensor(\"ob:0\", shape=(?, 1, 10), dtype=float32)\n # print(self.sy_ob_no)\n # Tensor(\"hidden:0\", shape=(?, 32), dtype=float32)\n # print(self.sy_hidden)\n ac = self.sess.run(self.sy_sampled_ac, feed_dict={\n self.sy_ob_no: sample_action_in_.reshape(-1, self.history, self.meta_ob_dim),\n self.sy_hidden: hidden,\n })\n assert len(ac) == 1\n ac = ac[0]\n\n # step the environment\n # YOUR CODE HERE\n ob, rew, done, _= env.step(ac)\n\n ep_steps += 1\n\n done = bool(done) or ep_steps == self.max_path_length\n # construct the meta-observation and add it to meta_obs\n # YOUR CODE HERE\n meta_obs[steps, :] = np.concatenate((ob, ac, [rew], [done]))\n\n rewards.append(rew)\n steps += 1\n\n in_ = meta_obs[steps, :]\n # add sample to replay buffer\n if is_evaluation:\n self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n else:\n self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n\n # start new episode\n if done:\n # compute stats over trajectory\n s = dict()\n s['rewards']= rewards[-ep_steps:]\n s['ep_len'] = ep_steps\n stats.append(s)\n ep_steps = 0\n\n if steps >= num_samples:\n break\n\n return steps, stats", "def append_experience(self, state, action, reward, next_state, done):\n self.replay_buffer.append((state, action, reward, next_state, done))", "def get(self) -> []:\n # Generate numpy arrays out of the buffer\n observations_array: numpy.ndarray = numpy.stack(numpy.array(self._observations))\n actions_array: numpy.ndarray = numpy.stack(numpy.array(self._actions))\n log_likelihoods_array: numpy.ndarray = numpy.stack(numpy.array(self._log_likelihoods))\n # Prepare serialized list of all data in the buffer\n serialized_observations: [] = []\n serialized_actions: [] = []\n serialized_log_likelihoods: [] = []\n advantages: [] = []\n rewards_to_go: [] = []\n trajectory_start_pointer: numpy.ndarray = numpy.zeros(self._parallel_amount, dtype=int)\n for trajectory_index in range(len(self._end_trajectories_pointers)):\n pointer: numpy.ndarray = self._end_trajectories_pointers[trajectory_index]\n last_step_reward: numpy.ndarray = self._last_step_rewards[trajectory_index]\n for i in range(self._parallel_amount):\n # Get the trajectory slice for the current parallel episode\n trajectory_slice = slice(trajectory_start_pointer[i], pointer[i])\n # Compute rewards and values by appending the last step reward\n rewards: numpy.ndarray = numpy.array(numpy.array(self._rewards)[trajectory_slice, i].tolist() + [last_step_reward[i]])\n values: numpy.ndarray = numpy.array(numpy.array(self._values)[trajectory_slice, i].tolist() + [last_step_reward[i]])\n # Compute GAE-Lambda advantage estimation (compute advantages using the values in the buffer taken from the model)\n deltas: numpy.ndarray = rewards[:-1] + self._discount_factor * values[1:] - values[:-1]\n advantages += self._discount_cumulative_sum(deltas, self._discount_factor * self._lambda_parameter).tolist()\n # Compute rewards-to-go\n rewards_to_go += (self._discount_cumulative_sum(rewards, self._discount_factor)[:-1]).tolist()\n # Get the observations, actions and log likelihoods of the trajectory\n observations_trajectory: numpy.ndarray = observations_array[trajectory_slice]\n actions_trajectory: numpy.ndarray = actions_array[trajectory_slice]\n log_likelihoods_trajectory: numpy.ndarray = log_likelihoods_array[trajectory_slice]\n # Serialize all observations, actions and log likelihoods for all parallel episodes\n serialized_observations += observations_trajectory[:, i].tolist()\n serialized_actions += actions_trajectory[:, i].tolist()\n serialized_log_likelihoods += log_likelihoods_trajectory[:, i].tolist()\n trajectory_start_pointer = pointer.copy()\n # Get the numpy arrays out of all serialized data (plus advantages and rewards to go, already serialized)\n observations_array = numpy.array(serialized_observations)\n actions_array = numpy.array(serialized_actions)\n log_likelihoods_array = numpy.array(serialized_log_likelihoods)\n rewards_to_go_array: numpy.ndarray = numpy.array(rewards_to_go)\n advantages_array: numpy.ndarray = numpy.array(advantages)\n # Execute the advantage normalization trick\n # Note: make sure mean and std are not zero!\n advantage_mean: float = float(numpy.mean(advantages_array)) + 1e-8\n global_sum_squared: float = float(numpy.sum((advantages_array - advantage_mean) ** 2)) + 1e-8\n advantage_std: float = numpy.sqrt(global_sum_squared / advantages_array.size) + 1e-8\n # Adjust advantages according to the trick\n advantages_array = ((advantages_array - advantage_mean) / advantage_std)\n # Reset the buffer\n self._observations = []\n self._actions = []\n self._rewards = []\n self._values = []\n self._log_likelihoods = []\n self._last_step_rewards = []\n # Reset pointer\n self._pointer: numpy.ndarray = numpy.zeros(self._parallel_amount, dtype=int)\n self._end_trajectories_pointers: [] = []\n # Return all the buffer components\n return [observations_array, actions_array, advantages_array, rewards_to_go_array, log_likelihoods_array]", "def add(self, experience):\n self.buffer.append(experience)", "def __init__(self, agent, make_env=lambda:gym.make(\"SpaceInvaders-v0\"), n_games=1, max_size=None,\n preprocess_observation = lambda obs:obs,agent_step=None):\n if not isinstance(make_env, function):\n env_name = make_env\n make_env = lambda: gym.make(env_name)\n\n #create atari games\n self.make_env = make_env\n self.envs = [self.make_env() for _ in range(n_games)]\n self.preprocess_observation = preprocess_observation\n\n\n #initial observations\n self.prev_observations = [self.preprocess_observation(make_env.reset()) for make_env in self.envs]\n\n #agent memory variables (if you use recurrent networks\n self.prev_memory_states = [np.zeros((n_games,)+tuple(mem.output_shape[1:]),\n dtype=get_layer_dtype(mem))\n for mem in agent.agent_states]\n\n #save agent\n self.agent = agent\n self.agent_step = agent_step or agent.get_react_function()\n\n # Create experience replay environment\n self.experience_replay = SessionPoolEnvironment(observations=agent.observation_layers,\n actions=agent.action_layers,\n agent_memories=agent.agent_states)\n self.max_size = max_size\n\n #whether particular session has just been terminated and needs restarting\n self.just_ended = [False] * len(self.envs)", "def add(self, experience: []):\n if len(self.buffer) + len(experience) >= self.buffer_size:\n self.buffer[0:1] = []\n self.buffer.append(experience)", "def get_experiences():\n return Experience.query.all()", "def test_cloned_policy(env, cloned_policy, num_episodes=50, render=True):\n total_rewards = []\n\n for i in range(num_episodes):\n print('Starting episode {}'.format(i))\n total_reward = 0\n state = env.reset()\n if render:\n env.render()\n time.sleep(.01)\n is_done = False\n while not is_done:\n action = np.argmax(\n cloned_policy.predict_on_batch(state[np.newaxis, ...])[0])\n state, reward, is_done, _ = env.step(action)\n total_reward += reward\n if render:\n env.render()\n time.sleep(.1)\n print(\n 'Total reward: {}'.format(total_reward))\n total_rewards.append(total_reward)\n\n mean = np.mean(total_rewards)\n std = np.std(total_rewards)\n print('Average total reward: {} (std: {})'.format(\n mean, std))\n\n return total_rewards", "def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')", "def init_reward(self):\n for i, p in enumerate(self.env):\n self.sample_rewards[i].append(p.sample())\n #self.num_played.append(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs an update of the QNetwork using the provided optimizer and buffer
def update_q_net( q_net: VisualQNetwork, optimizer: torch.optim, buffer: Buffer, action_size: int ): BATCH_SIZE = 1000 NUM_EPOCH = 3 GAMMA = 0.9 batch_size = min(len(buffer), BATCH_SIZE) random.shuffle(buffer) # Split the buffer into batches batches = [ buffer[batch_size * start : batch_size * (start + 1)] for start in range(int(len(buffer) / batch_size)) ] for _ in range(NUM_EPOCH): for batch in batches: # Create the Tensors that will be fed in the network obs = torch.from_numpy(np.stack([ex.obs for ex in batch])) reward = torch.from_numpy( np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1) ) done = torch.from_numpy( np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1) ) action = torch.from_numpy(np.stack([ex.action for ex in batch])) next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch])) # Use the Bellman equation to update the Q-Network target = ( reward + (1.0 - done) * GAMMA * torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values ) mask = torch.zeros((len(batch), action_size)) mask.scatter_(1, action, 1) prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True) criterion = torch.nn.MSELoss() loss = criterion(prediction, target) # Perform the backpropagation optimizer.zero_grad() loss.backward() optimizer.step()
[ "def update_optimizer(self, context, optimizer, host):\n pass", "def _refresh_buffers(self) -> None:", "def reload(self,offline_buffer):\n #loading online buffer from offline buffer by sampling (online_buffer.buffer_size) samples \n self.buffer = SumTree(self.buffer_size)\n names, idxs = offline_buffer.sample_batch(self.buffer_size)\n self.offline_idxs = idxs\n state , action , reward, done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[0])\n #loop on names and load in the online buffer\n for i in range(len(names)-1):\n next_state , next_action , next_reward , done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[i+1])\n #done = 0\n self.memorize(state, action, reward, done, next_state, error=[1])\n state , action , reward = next_state , next_action , next_reward", "def update(src):", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def _update(self, opt, arg):\n self.cur_opt, self.cur_arg = opt, arg", "def update():\n data = update_queue.get().to_dict('list')\n source.stream(data)", "def update_vars(self, optimizer, epoch) -> None:\r\n\r\n\t\tif self.trainable:\r\n\t\t\tself.optimizations[\"weight\"] = optimizer.map_data(self.optimizations[\"weight\"], epoch)\r\n\t\t\tself.optimizations[\"bias\"] = optimizer.map_data(self.optimizations[\"bias\"], epoch)\r\n\r\n\t\t\tself.weights -= self.optimizations[\"weight\"][0]\r\n\t\t\tself.bias -= self.optimizations[\"bias\"][0]", "def _update_target_network(self):\n\n self.target_network.set_weights(self.q_network.get_weights())", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_weights(self, key, loss_differentiate, update_number, originial_weights):\n if (self.optimizer == 'gradient_descent'):\n self.weights[key] -= self.learning_rate * loss_differentiate\n\n elif (self.optimizer == 'gradient_descent_with_momentum'):\n self.GD_momentum_weight_update(key, loss_differentiate) \n\n elif (self.optimizer == 'RMSProp'):\n self.RMSProp_weight_update(key, loss_differentiate)\n\n elif (self.optimizer == 'NAG'):\n self.NAG_weight_update(key, loss_differentiate, originial_weights)\n\n elif (self.optimizer == 'AdaGrad'):\n self.AdaGrad_weight_update(key, loss_differentiate)\n \n elif (self.optimizer == 'Adam'):\n self.adam_weight_update(key, loss_differentiate, update_number)", "def set_optimizer(self, optimizer):\n\n pass", "def update_target_network(self):\n self.target_network.set_weights(self.q_network.get_weights())", "def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()", "def update(self):\n # pull all available chunks\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t\n\n # update graph handles\n if self.gbuffer.any():\n for k in range(0, self.channel_count):\n self.handles[k].setData(self.gtimes,\n self.gbuffer[k::self.channel_count])", "def update_network_with_reward(self, reward):\n\n if reward != 0:\n\n total_decay = self.decay\n # starting from the end\n # print(\"len\", len(self.actions_history[\"obs\"]))\n for i in range(len(self.actions_history[\"obs\"])-1, -1, -1):\n # print(i)\n distances = np.multiply(self.actions_history[\"res\"][i], total_decay * reward)\n (grad_w, grad_b) = self.backprop(self.actions_history[\"obs\"][i], distances)\n\n # print(\"i\", i)\n # print(\"obs\\n\", self.actions_history[\"obs\"][i])\n # print(\"grad_w\\n\", grad_w)\n\n for i in range(self.nb_weights_matrix):\n # adjusting the values with the average gradient computed using the reward\n self.weights_layers[i] = self.weights_layers[i] + grad_w[i]\n self.biases_layers[i] = self.biases_layers[i] + grad_b[i]\n\n total_decay *= self.decay", "def update(oformat, param, stream, year, month, timestep, back, queue):\n ####I'm separating this in update and , so eventually update can check if no yr/mn passed or only yr passed which was the last month downloaded\n \n update = True\n if back:\n print('You cannot use the backwards option with update')\n sys.exit()\n if queue:\n dump_args(update, oformat, stream, list(param), year, list(month), timestep, back)\n else: \n api_request(update, oformat, stream, list(param), year, list(month), timestep, back)", "def update(self, gradient, optimizer=None, relink=None):\n # Recover the defaults, if missing\n optimizer = self._resolve_defaults(optimizer=optimizer)[0]\n # Set the gradient\n self.set_gradient(gradient, relink=(self._config.relink if relink is None else relink))\n # Perform the update step\n optimizer.step()", "def update_q(self, curr_value, target_value, batch):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for lback index self._in_loop becomes true in the second state of the loop
def _get_lback_index(self, model, last) -> int: assert last > 0 # last state cannot be loop-back. assert model.get_value(self.totime(self._in_loop, last)).is_true() assert model.get_value(self.totime(self._in_loop, 0)).is_false() idx = last - 1 while model.get_value(self.totime(self._in_loop, idx)).is_true(): idx -= 1 assert idx >= 0 assert model.get_value(self.totime(self._in_loop, idx + 1)).is_true() assert model.get_value(self.totime(self._in_loop, idx)).is_false() assert model.get_value(self.totime(self.start_loop, idx)).is_true() return idx
[ "def step_back():\r\n\r\n global index\r\n index -= 1", "def _is_looping(self):\n return self.loop_end > 0.0 and self.loop_end > self.loop_start", "def is_loop(self):\n return self.vertex_a == self.vertex_b", "def __should_loop(self):\n if self.loop:\n if self.num_of_loops == \"inf\":\n return True\n else:\n return self.current_loop >= self.num_of_loops\n else:\n return False", "def step_back_while(cur_index, condition):\n while cur_index >= 0 and condition(cur_index):\n cur_index -= 1\n return cur_index", "def forward_loop_exits(self):\n return self._forward_loop_exits", "def loop():\n global loop_idx\n sys.stdout.write('loop index %d/%d\\r\\n' % (loop_idx, _LOOPS))\n time.sleep(0.5)\n loop_idx += 1\n return loop_idx > _LOOPS", "def check_while(self, ind):\n self._vmcode += \"not\\nif-goto WHILE_END\" + str(ind) + \"\\n\"", "def has_loop(self, color):\n # if not self.is_legal():\n # return False\n starting_positions = self._tile_value.keys()[0]\n starting_tile = self._tile_value[starting_positions]\n current_length = 1\n target_length = len(SOLITAIRE_CODES)\n print starting_positions\n direction = starting_tile.index(color)\n next_tile_index = self.get_neighbor(starting_positions, direction)\n next_tile = self._tile_value.get(next_tile_index)\n if not next_tile:\n return False\n corresponding_index = reverse_direction(direction)\n next_direction = next_tile.index(color)\n if next_direction == corresponding_index:\n for idx in xrange(len(next_tile)):\n if next_tile[idx] == color and idx != corresponding_index:\n next_direction = idx\n break\n while True:\n current_length += 1\n if next_tile_index == starting_positions and current_length >= target_length:\n return True\n elif next_tile_index == starting_positions and current_length < target_length:\n return False\n next_tile_index = self.get_neighbor(next_tile_index, next_direction)\n next_tile = self._tile_value.get(next_tile_index)\n if not next_tile:\n return False\n direction = next_direction\n corresponding_index = reverse_direction(direction)\n next_direction = next_tile.index(color)\n if next_direction == corresponding_index:\n for idx in xrange(len(next_tile)):\n if next_tile[idx] == color and idx != corresponding_index:\n next_direction = idx\n break", "def stop(self):\r\n self.looping = 0", "def continueLoop(self):\n return(len(self.stages) > len(self.processedStages))", "def check_add_loop_list(self, loop):\n\n temp_length = len(self.list_loops)\n\n if temp_length < 1:\n self.list_loops.append(loop)\n return 0\n else:\n for i in range(temp_length):\n for j in self.list_loops[i]:\n if np.array_equal(j, loop[0]):\n return i\n self.list_loops.append(loop)\n return temp_length-1", "def _end_loop(self):\n loop = self.loop_stack.current_loop\n\n if loop.end_addr is None:\n loop.end_addr = self.program_counter_next\n\n # Increment the loop counter, test and jump if necessary\n self.reg[loop.reg] += loop.inc\n if self.reg[loop.reg] < loop.end_val:\n self.program_counter_next = loop.start_addr\n else:\n self.loop_stack.pop()", "def goingToBreak(self):\n \n if (\n (self.current_loc == 0 and not self.direction_forward) or\n (self.current_loc == len(self.destinations)-1 and self.direction_forward)\n ):\n return True\n return False", "def endloop(self):\n try:\n n, start = self._loop_stack[-1]\n except IndexError:\n print(\"No loops remaining.\")\n return\n if n == 1:\n self._loop_stack.pop()\n else:\n self._loop_stack[-1][0] -= 1\n self._pc = start", "def _is_at_end(self, binvect):\n last = max(k for (k, v) in enumerate(binvect) if v == 1)\n n_step = len(self.pas)\n steps_between = np.arange(last + 1, n_step)\n if 0 <= len(steps_between) <= self._n_to_end:\n self._set_label(binvect, still)\n for k in steps_between:\n self.labels[k] = still\n return True\n else:\n return False", "def _loop_exits_early(loop):\n loop_nodes = (astroid.For, astroid.While)\n definition_nodes = (astroid.FunctionDef, astroid.ClassDef)\n inner_loop_nodes = [\n _node\n for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)\n if _node != loop\n ]\n return any(\n _node\n for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)\n if _get_break_loop_node(_node) not in inner_loop_nodes\n )", "def has_prev(self):\n return self.index > 0", "def step_back(self):\n if len(self.history) > 0:\n self.round, self.game_pointer, self.round_counter, self.dealer, self.public_cards, \\\n self.players, self.history_raises_nums = self.history.pop()\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stores in a random location in the Linked list
def add(self, item): if self.count == 0: random_location = 0 else: random_location = random.randint(0, self.count - 1) self.insert(Node(item), random_location)
[ "def random_location(self):\n return random.choice(self.locations_list)", "def copyRandomList(self, head: 'Node') -> 'Node': \n map_new = collections.defaultdict(lambda: Node(None, None, None))\n map_new[None] = None # if a node's next or random is None, their value will be None but not a new Node, doing so removes the if-else check in the while loop\n \n nd_old = head\n while nd_old:\n map_new[nd_old].val = nd_old.val\n map_new[nd_old].next = map_new[nd_old.next]\n map_new[nd_old].random = map_new[nd_old.random]\n nd_old = nd_old.next\n return map_new[head]", "def test_linked_list_insert_by_index():\n Iteration = 100\n M = 10\n for _ in range(Iteration):\n size = randint(0, M)\n L = [randint(-10, 10) for _ in range(size)]\n ll = LinkedList(L)\n ind = randint(0, size)\n num = randint(-1000, 1000)\n L.insert(ind, num)\n ll.insert_by_index(ind, num)\n assert ll[ind].data == L[ind]", "def getRandom(self):\n randnode = random.randint(0, self.count - 1)\n node = self.head\n for _ in range(randnode):\n node = node.next\n return node.val", "def randVacantPoint(L):\n pliste = vacantPoint(L)\n\n return pliste[random.randint(0, len(pliste)-1)]", "def copyRandomList(head: 'RandomNode') -> 'RandomNode':\n from collections import defaultdict\n def helper(node):\n if not node: return\n p = RandomNode(node.val, None, None)\n d[node.random].append(p)\n dr[node] = p\n p.next = helper(node.next)\n return p\n d = defaultdict(list)\n dr = {}\n p = helper(head)\n while head:\n if head in d:\n for q in d[head]:\n q.random = dr[head]\n head = head.next\n return p", "def test_linked_list_access():\n ll = LinkedList()\n with pytest.raises(IndexError):\n ll.front()\n with pytest.raises(IndexError):\n ll.back()\n \n Iteration = 100\n M = 10\n for _ in range(Iteration):\n size = randint(1, M)\n L = [randint(-100, 100) for _ in range(size)]\n ll = LinkedList(L)\n assert ll.front().data == L[0]\n assert ll.back().data == L[-1]", "def getRandom(self):\n index = random.randrange(0, self.length)\n node = self.head\n while index:\n node = node.next\n index -= 1\n return node.val", "def get_random_link(self):\n return tuple([random.randint(0, d-1) for d in self.link_idxs])", "def generate_items_position(self):\n while len(self.list_pos) != 3:\n self.list_pos = random.sample(self.level.paths, 3)\n for pos in self.list_pos:\n if pos == self.level.start:\n self.list_pos = []\n elif pos == self.level.exit:\n self.list_pos == []\n self.ether_pos = self.list_pos[0]\n self.needle_pos = self.list_pos[1]\n self.tube_pos = self.list_pos[2]", "def copyRandomList3(self, head: 'Node') -> 'Node':\n if not head:\n return None\n\n old_node = head\n # 将复制的新结点插入到两结点中间\n while old_node:\n # 深拷贝结点\n new_node = Node(old_node.val)\n # 将新结点插入到原始链表中,位置在前后两个结点的中间\n new_node.next = old_node.next\n old_node.next = new_node\n old_node = new_node.next\n\n old_node = head\n # 复制随机指针\n while old_node:\n if old_node.random:\n # 新结点 new_node = old_node.next\n # new_node.random 指向的结点是 old_node.random 指向的下一个结点,即 new_node.random = old_node.random.next\n old_node.next.random = old_node.random.next\n else:\n old_node.next.random = None\n old_node = old_node.next.next\n\n # 拆分和还原链表\n # 将 A -> A' -> B -> B' -> C -> C'\n # 拆分成原始链表 A -> B -> C 和深拷贝链表 A' -> B' -> C'\n old_node = head\n new_node = head.next\n dummy = Node(0, next=new_node)\n while old_node:\n old_node.next = old_node.next.next\n # 由于新链表的最后一个结点肯定是 new_node,所以需要对 new_node.next 是否为空进行判断,再进行赋值\n new_node.next = new_node.next.next if new_node.next else None\n old_node = old_node.next\n new_node = new_node.next\n return dummy.next", "def random_insert_seq(lst, seq):\n insert_locations = random.sample(range(len(lst) + len(seq)), len(seq))\n inserts = dict(zip(insert_locations, seq))\n iter_lst = iter(lst)\n lst[:] = [\n inserts[pos]\n if pos in inserts else next(iter_lst)\n for pos in range(len(lst) + len(seq))]", "def totem_random():\n random_head()\n random_head()\n random_head()", "def random_adjacent_unoccupied_location(self):\n return random.choice(self.adjacent_unoccupied_locations())", "def place_random(self):\n self.x = random.randint(1, self.width - OBJECT_SIZE)\n self.y = random.randint(1, self.height - OBJECT_SIZE)", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def random_destination(self):\n # Select a point on the screen\n w, h = screen_size\n self.destination = Vector(randint(0, w), randint(0, h))", "def put_items(self,*maplist):\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))\n\n while maplist[self.position_y][self.position_x] == \"x\":\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))", "def topology_random_connect(self, probability):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tif not (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability:\n\t\t\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\t\t\tself.sites[j].neighbors.append(self.sites[i])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if c is a printable character. We do this by checking for ord value above 32 (space), as well as CR (\r), LF (\n) and tab (\t)
def is_printable(c): return ord(c)>=32 or c in ['\r','\n', '\t']
[ "def _is_printable(char):\n category = unicodedata.category(char)\n return (not category.startswith(\"C\") and\n (not category.startswith(\"Z\") or category == \"Zs\"))", "def is_printable_char(character):\n if character >= 32 and character <= 127:\n return True\n if character == 10 or character == 13:\n return True\n return False", "def is_printable(ch):\n return ch in string.digits + string.ascii_letters + string.punctuation", "def is_printable(s):\n for c in s:\n if isinstance(c, int):\n c = chr(c)\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True", "def IsPrintable(byte):\n return byte >= ord(' ') and byte <= ord('~')", "def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True", "def is_printable(s):\n return all(c in string.printable for c in s)", "def isprint_ascii(char):\n return 0x20 <= ord(char) and ord(char) <= 0x7e", "def is_printable(b):\n return b in e(string.printable)", "def is_p4d_printable(c):\n if ord(c) < 0x20:\n return False\n if ord(c) == 0x7F:\n return False\n return True", "def ascii_printable(s: str) -> bool:\n return frozenset(s).issubset(_ascii_pa)", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def is_ascii(s):\r\n return all(ord(c) < 128 for c in s)", "def is_ascii(s):\n return all(ord(c) < 128 for c in s)", "def is_ascii(token):\n\n printable = set(string.printable)\n\n for char in token:\n if char not in printable:\n return False\n\n return True", "def __is_tib_letter(self, c):\n return c >= '\\u0F40' and c <= '\\u0FBC' and c != '\\u0F7F'", "def isthaichar(ch: str) -> bool:\n ch_val = ord(ch)\n if ch_val >= 3584 and ch_val <= 3711:\n return True\n return False", "def get_printable(text):\n try:\n if all(c in string.printable for c in text):\n return text\n except TypeError:\n if all(chr(c) in string.printable for c in text):\n return text\n \n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter control characters out of the string buf, given a list of control codes that represent backspaces, and a regex of escape sequences. backspaces are characters emitted when the user hits backspace. This will probably vary from terminal to terminal, and this list should grow as new terminals are encountered. escape_regex is a Regex filter to capture all escape sequences.
def sanitize(buf, backspaces=['\x08\x1b[K', '\x08 \x08'], escape_regex=re.compile(r'\x1b(\[|\]|\(|\))[;?0-9]*[0-9A-Za-z](.*\x07)?')): # Filter out control characters # First, handle the backspaces. for backspace in backspaces: try: while True: ind = buf.index(backspace) buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):])) except: pass strip_escapes = escape_regex.sub('',buf) # strip non-printable ASCII characters clean = ''.join([x for x in strip_escapes if is_printable(x)]) return clean
[ "def escape_buffer(self, buf):\n esc_byte = bytes({cb.ESC})\n esc_escaped = bytes({cb.ESC}) + bytes({cb.ESC ^ cb.ESC_XOR})\n\n start_byte = bytes({cb.START})\n start_escaped = bytes({cb.ESC}) + bytes({cb.START ^ cb.ESC_XOR})\n\n escaped = buf.replace(esc_byte, esc_escaped) \\\n .replace(start_byte, start_escaped)\n return escaped", "def restore_escapes(s):\n nonlocal escapes\n out = []\n i = 0\n while i < len(s):\n c = s[i]\n if c == chr(0):\n ndx = (ord(s[i+1])<<4) | ord(s[i+2])\n out.append(escapes[ndx])\n i += 2\n else:\n out.append(c)\n i += 1\n escapes = []\n return ''.join(out)", "def strip_control_characters(text):\n # Unicode control characters\n # [Cc]: Other, Control [includes new lines]\n # [Cf]: Other, Format\n # [Cn]: Other, Not Assigned\n # [Co]: Other, Private Use\n # [Cs]: Other, Surrogate\n control_chars = set(['Cc', 'Cf', 'Cn', 'Co', 'Cs'])\n retained_chars = ['\\t', '\\n', '\\r', '\\f']\n\n # Remove non-printing control characters\n return \"\".join([\"\" if (unicodedata.category(char) in control_chars) and (char not in retained_chars) else char for char in text])", "def escape(self, text, escape_chars):\n _bs = \"\\\\\"\n # backslash is always escaped\n text = text.replace(_bs, _bs * 2)\n for _el in escape_chars:\n assert _el != _bs, \"Backslash has been already escaped\"\n text = text.replace(_el, _bs + _el)\n return text", "def term_escapes_before(index):\n letters = []\n escapes = []\n for fragment in self.term.split_seqs(line):\n fraglen = self.term.length(fragment)\n assert fraglen in [0, 1]\n [escapes, letters][fraglen].append(fragment)\n if len(letters) > index:\n break\n return \"\".join(escapes)", "def escape(pattern):\n if isinstance(pattern, str):\n alphanum = _alphanum_str\n s = list(pattern)\n for i, c in enumerate(pattern):\n if c not in alphanum:\n if c == \"\\000\":\n s[i] = \"\\\\000\"\n else:\n s[i] = \"\\\\\" + c\n return \"\".join(s)\n else:\n alphanum = _alphanum_bytes\n s = []\n esc = ord(b\"\\\\\")\n for c in pattern:\n if c in alphanum:\n s.append(c)\n else:\n if c == 0:\n s.extend(b\"\\\\000\")\n else:\n s.append(esc)\n s.append(c)\n return bytes(s)", "def strip_from_ansi_esc_sequences(text):\n # esc[ + values + control character\n # h, l, p commands are complicated, let's ignore them\n seq_regex = r\"\\x1b\\[[0-9;]*[mKJusDCBAfH]\"\n regex = re.compile(seq_regex)\n start = 0\n response = \"\"\n for match in regex.finditer(text):\n end = match.start()\n response += text[start:end]\n\n start = match.end()\n response += text[start:len(text)]\n return response", "def eval_escapes(s):\n # by Rob Speer\n\n escape_sequence_re = re.compile(\n r'''\n ( \\\\U........ # 8-digit Unicode escapes\n | \\\\u.... # 4-digit Unicode escapes\n | \\\\x.. # 2-digit Unicode escapes\n | \\\\[0-7]{1,3} # Octal character escapes\n | \\\\N\\{[^}]+\\} # Unicode characters by name\n | \\\\[\\\\'\"abfnrtv] # Single-character escapes\n )''',\n re.UNICODE | re.VERBOSE\n )\n\n def decode_match(match):\n return codecs.decode(match.group(0), 'unicode-escape')\n\n return escape_sequence_re.sub(decode_match, s)", "def sanitize_bytes_control_chars(raw_bytes):\n try:\n return control_char_re.sub(\n '', raw_bytes.decode('utf-8')).encode('utf-8')\n except UnicodeDecodeError:\n return control_char_re.sub(\n '', raw_bytes.decode('cp1252')).encode('utf-8')", "def Control_EscapeMnemonics(*args, **kwargs):\n return _core_.Control_EscapeMnemonics(*args, **kwargs)", "def _escaped_text_from_text(text, escapes=\"eol\"):\n # TODO:\n # - Add 'c-string' style.\n # - Add _escaped_html_from_text() with a similar call sig.\n import re\n\n if isinstance(escapes, basestring):\n if escapes == \"eol\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\"}\n elif escapes == \"whitespace\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\",\n '\\t': \"\\\\t\", ' ': \".\"}\n elif escapes == \"eol-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\"}\n elif escapes == \"whitespace-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\", '\\t': \"\\\\t\", ' ': '.'}\n else:\n raise ValueError(\"unknown text escape style: %r\" % escapes)\n\n # Sort longer replacements first to allow, e.g. '\\r\\n' to beat '\\r' and\n # '\\n'.\n escapes_keys = escapes.keys()\n escapes_keys.sort(key=lambda a: len(a), reverse=True)\n\n def repl(match):\n val = escapes[match.group(0)]\n return val\n escaped = re.sub(\"(%s)\" % '|'.join([re.escape(k) for k in escapes_keys]),\n repl,\n text)\n\n return escaped", "def _create_char_spinner():\r\n while True:\r\n for c in '|/-\\\\':\r\n yield c", "def _get_compiled_pattern_list(self, pattern_list):\n compiled_list = []\n for pattern in pattern_list:\n try:\n compiled_pattern = re.compile(pattern, re.DOTALL | re.MULTILINE)\n compiled_list.append(compiled_pattern)\n except re.error as err:\n raise errors.DeviceError(\"Device {} expect failed. \"\n \"Invalid regex pattern {}. Error {!r}\".format(\n self._device_name, pattern, err))\n return compiled_list", "def remove_control_chars(json_string):\n return re.sub('[\\x00-\\x1f]', '',json_string)", "def test_re_handling_escape_char(self):\n string_for_regex = '%sing'\n regex = re_handling.ReHandling(string_for_regex)\n self.assertEqual(regex.pattern.pattern, '\\\\' + string_for_regex, 'regex test failed')", "def _create_char_spinner(self):\n while True:\n for c in '|/-\\\\':\n yield c", "def strip_ansi(text):\n # ansi_escape1 = re.compile(r'\\x1b[^m]*m')\n # text = ansi_escape1.sub('', text)\n # ansi_escape2 = re.compile(r'\\x1b\\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?')\n ansi_escape3 = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]', flags=re.IGNORECASE)\n text = ansi_escape3.sub('', text)\n return text", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def remove_control_characters(text, keep: Optional[str] = None):\n if keep is None:\n keep = [\"\\n\", \"\\t\", \"\\r\"]\n return \"\".join(c for c in text if c in keep or unicodedata.category(c)[0:2] != \"Cc\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells the child process to resize its window
def resize_child_window(self): s = struct.pack('HHHH', 0, 0, 0, 0) x = fcntl.ioctl(0,termios.TIOCGWINSZ,s) fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)
[ "def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def OnWindowSetResizable(self, Resizable=sentinel):", "def triggerResize(self):\r\n self.parent.requestResize()", "def _resize_window(self):\n prop = self._auto[self._index].proportions()\n self._win = pygame.display.set_mode((self._cell_x * prop[1],\n self._cell_y * prop[0]))", "def set_window_size(self):\r\n\r\n try:\r\n window_size = self.console_buffer.GetConsoleScreenBufferInfo()['Window']\r\n coord = win32console.PyCOORDType(X = (self.cols*2), Y = self.rows)\r\n self.console_buffer.SetConsoleScreenBufferSize(coord)\r\n\r\n window_size.Right = (self.cols*2) - 1\r\n window_size.Bottom = self.rows - 1\r\n\r\n self.console_buffer.SetConsoleWindowInfo(\r\n Absolute=True, ConsoleWindow=window_size)\r\n except pywintypes.error:\r\n raise Exception('Some issue occured when resizing the console, '\r\n 'try decreasing the size or setting it back to default')", "def adjust_windowsize(self):\n system = Utility.get_os()\n if system == \"linux\":\n w, h, _ = self.get_screensize()\n wscale = 0.5\n hscale = 0.7\n self.resize(wscale * w, hscale * h)\n else:\n self.resize(800, 600)", "def resize_window(self, width, height):\n self.setGeometry(0,0,width,height)", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def resize(self, *args):\n if self.parent is None: # when deleted\n return\n if self.parent.render_window is None: # BasePlotter\n return\n\n if self._prior_window_size != self.parent.window_size:\n self._prior_window_size = self.parent.window_size\n\n actor = self._actors['background']\n image_data = actor.GetInput()\n origin = image_data.GetOrigin()\n extent = image_data.GetExtent()\n spacing = image_data.GetSpacing()\n xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]\n yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]\n yd = (extent[3] - extent[2] + 1) * spacing[1]\n dist = self.camera.distance\n\n # make the longest dimensions match the plotting window\n img_dim = np.array(image_data.dimensions[:2])\n self.camera.focus = np.array([xc, yc, 0.0])\n self.camera.position = np.array([xc, yc, dist])\n\n ratio = img_dim / np.array(self.parent.window_size)\n scale_value = 1\n if ratio.max() > 1:\n # images are not scaled if larger than the window\n scale_value = ratio.max()\n\n if self._scale is not None:\n scale_value /= self._scale\n\n self.camera.parallel_scale = 0.5 * yd / self._scale", "def set_win_size(self, sig, data):\n try:\n win_size = self.get_win_size()\n self.channel.resize_pty(height=win_size[0], width=win_size[1])\n except Exception:\n pass", "def PostSizeEventToParent(*args, **kwargs):\n return _core_.Window_PostSizeEventToParent(*args, **kwargs)", "def resize(self, yx=None):\n if yx == None:\n yx = self.screen.getmaxyx()\n self.screen.clear()\n curses.resizeterm(yx[0], yx[1])\n self.setup_windows(resize = True)\n self.screen.refresh()", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def windowResized(self, renderWindow):\n pass", "def set_window_size(w, h):\n _global_config.screen_w = w\n _global_config.screen_h = h", "def on_parent_resize(self, event):\n #self.resize()\n #self.resize_scaled(drag_rootx=self.resize_frame.winfo_rootx())\n self.resize_scaled(current=MathStat.lerp(0,\n self.prop_frame.winfo_width(), self.last_right_bias))", "def resizeEvent(self, *args, **kwargs):\n self.windowMoved.emit()", "def resize(self, width, height):\n\n\t\tself._window.resize(width, height)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launch the appropriate shell as a login shell It will be either bash or tcsh depending on what the user is currently running. It checks the SHELL variable to figure it out.
def run_shell(): shell = get_shell() if shell not in ['bash','tcsh']: raise ValueError, "Unsupported shell (only works with bash and tcsh)" os.execvp(shell,(shell,"-l"))
[ "def get_default_shell():\n username = getpass.getuser()\n shell = pwd.getpwnam(username).pw_shell\n return shell", "def get_default_shell():\n if is_windows():\n return 'cmd.exe'\n else:\n import pwd\n import getpass\n\n if 'SHELL' in os.environ:\n return os.environ['SHELL']\n else:\n username = getpass.getuser()\n shell = pwd.getpwnam(username).pw_shell\n return shell", "def loginShell(self, shell=None):\n\n\t\tif shell is None:\n\t\t\traise exceptions.BadArgumentError(\n\t\t\t\t_(u'You must specify a shell'))\n\n\t\tif shell not in LMC.configuration.users.shells:\n\t\t\t\traise exceptions.BadArgumentError(_(u'Invalid shell \"{0}\". '\n\t\t\t\t\t'Valid shells are {1}.').format(stylize(ST_BAD, shell),\n\t\t\t\t\t', '.join(stylize(ST_COMMENT, shell)\n\t\t\t\t\t\tfor shell in LMC.configuration.users.shells)))\n\n\t\twith self.lock:\n\t\t\tself.__loginShell = shell\n\t\t\tself.serialize()\n\n\t\t\tLicornEvent('user_loginShell_changed', user=self.proxy).emit(priorities.LOW)\n\n\t\t\tlogging.notice(_(u'Changed user {0} shell to {1}.').format(\n\t\t\t\tstylize(ST_NAME, self.__login), stylize(ST_COMMENT, shell)))", "def on_use_login_shell_toggled(self, chk):\n self.client.set_bool(KEY('/general/use_login_shell'), chk.get_active())", "def get_user_shell():\n try:\n pw_shell = pwd.getpwuid(os.geteuid()).pw_shell\n except KeyError:\n pw_shell = None\n\n return pw_shell or '/bin/sh'", "def shell(app, shell_name, shell_path, shell_args): # no cov\n app.ensure_environment_plugin_dependencies()\n\n if app.env == app.env_active:\n app.abort(f'Already in environment: {app.env}')\n\n if app.env in app.project.config.matrices:\n app.display_error(f'Environment `{app.env}` defines a matrix, choose one of the following instead:\\n')\n for env_name in app.project.config.matrices[app.env]['envs']:\n app.display_error(env_name)\n\n app.abort()\n\n if not shell_name:\n shell_name = app.config.shell.name\n if not shell_path:\n shell_path = app.config.shell.path\n if not shell_args:\n shell_args = app.config.shell.args\n\n if not shell_path:\n import shellingham\n\n try:\n shell_name, command = shellingham.detect_shell()\n except shellingham.ShellDetectionFailure:\n from hatch.utils.fs import Path\n\n shell_path = app.platform.default_shell\n shell_name = Path(shell_path).stem\n else:\n if app.platform.windows:\n shell_path = command\n else:\n shell_path, *shell_args = app.platform.modules.shlex.split(command)\n\n with app.project.location.as_cwd():\n environment = app.get_environment()\n app.prepare_environment(environment)\n\n first_run_indicator = app.cache_dir / 'shell' / 'first_run'\n if not first_run_indicator.is_file():\n app.display_waiting(\n 'You are about to enter a new shell, exit as you usually would e.g. '\n 'by typing `exit` or pressing `ctrl+d`...'\n )\n first_run_indicator.parent.ensure_dir_exists()\n first_run_indicator.touch()\n\n environment.enter_shell(shell_name, shell_path, shell_args)", "def UpdateFromShell(self, shell):\n import koprocessutils\n assert sys.platform != \"win32\", \"'UpdateFromShell' is not available for Windows\"\n\n ##XXX Disable updating user environment from the shell. This can\n ## be a source of hangs! See bug 38216.\n #return \"updating user environment with shell is disabled\"\n\n if not shell:\n self._UpdateFromStartupEnv()\n koprocessutils.resetUserEnv()\n return\n if not (os.path.isfile(shell)):\n return \"given shell path does not exist: %r\" % shell\n\n # Determine what class of shell this is: bash, csh, etc.\n patterns = [ # pattern to match against basename\n (\"csh\", re.compile(\"csh\")),\n (\"korn\", re.compile(\"ksh\")),\n (\"zorn\", re.compile(\"zsh\")),\n (\"bash\", re.compile(\"bash|^sh$\")),\n (\"ash\", re.compile(\"^ash$\")),\n ]\n basename = os.path.basename(shell)\n type = None\n for name, pattern in patterns: \n if pattern.search(basename):\n type = name\n break\n else:\n return \"Don't know what kind of shell '%s' is. It doesn't look \"\\\n \"like any of Bash, csh, zorn or ash.\" % shell\n\n # Run the correct voodoo to get the environment from this shell.\n # the important part here is that the shell must be a login shell,\n # and should not be an interactive shell. interactive shells\n # have a tendency to lock up komodo (at least on tiger)\n stdin = None\n if ' ' in shell:\n return \"cannot yet handle spaces in shell path: %r\" % shell\n\n if type == \"bash\":\n # interactive bash locks up on tiger\n cmd = \"%s -lc printenv\" % shell\n\n elif type == \"csh\":\n # first *arg* is '-' or -l for login shell, so we must use stdin\n cmd = \"%s -l\" % shell\n stdin = \"printenv\"\n\n # all other shell man pages I looked at say \n # first char of arg 0 is - means login shell, so -c printenv should\n # be fine.\n else:\n cmd = \"%s -c printenv\" % shell\n\n stdout, stderr, retval = run(cmd, stdin)\n if retval:\n return \"error getting environment from '%s'\" % cmd\n if not stdout:\n return \"no stdout received from printenv\"\n self._userEnviron = parse_bash_set_output(stdout)\n koprocessutils.resetUserEnv()", "def open_shell():\n app_ctx = AppContext()\n shell_ctx = globals()\n shell_ctx.update(vars(app_ctx))\n code.interact(local=shell_ctx)\n return", "def on_default_shell_changed(self, combo):\n citer = combo.get_active_iter()\n if not citer:\n return\n shell = combo.get_model().get_value(citer, 0)\n # we unset the value (restore to default) when user chooses to use\n # user shell as guake shell interpreter.\n if shell == USER_SHELL_VALUE:\n self.client.unset(KEY('/general/default_shell'))\n else:\n self.client.set_string(KEY('/general/default_shell'), shell)", "def shell():\n open_shell()", "def getEnv(self):\n\n self.shell = os.getenv('SHELL').split('/')[-1]\n return", "def shell(self):\n logger.debug('Opening shell inside container environment')\n subprocess.call(\n self._container_run_command() + [\n 'bash',\n ]\n )", "def shell(self):\r\n channel = self._ssh_client.invoke_shell()\r\n interactive_shell(channel)", "def get_shell(self):\n if self.shell is None:\n self.shell = self.internal_shell\n\n return self.shell", "def start_interactive_shell(self, pty, command=None, logger=None, initial_input=None):\n self.run(pty, command='/bin/bash', logger=logger, initial_input=initial_input)", "def runner_login():\n instance = RunnerInstance.instance()\n run_remote_shell(instance)", "def detect_shell() -> Optional[str]:\n shell_var = os.environ.get('SHELL')\n if shell_var:\n return os.path.basename(shell_var)\n return None", "def run_shell(kit):\n context = {\n 'kit': kit,\n }\n try:\n import IPython\n except ImportError:\n interact(local=context)\n else:\n interactive_shell = IPython.frontend.terminal.embed.InteractiveShellEmbed()\n interactive_shell(local_ns=context)", "def command_shell(\n session_name,\n window_name,\n socket_name,\n socket_path,\n command,\n shell,\n use_pythonrc,\n use_vi_mode,\n):\n server = Server(socket_name=socket_name, socket_path=socket_path)\n\n util.raise_if_tmux_not_running(server=server)\n\n current_pane = util.get_current_pane(server=server)\n\n session = util.get_session(\n server=server, session_name=session_name, current_pane=current_pane\n )\n\n window = util.get_window(\n session=session, window_name=window_name, current_pane=current_pane\n )\n\n pane = util.get_pane(window=window, current_pane=current_pane) # NOQA: F841\n\n if command is not None:\n exec(command)\n else:\n if shell == \"pdb\" or (os.getenv(\"PYTHONBREAKPOINT\") and PY3 and PYMINOR >= 7):\n from tmuxp._compat import breakpoint as tmuxp_breakpoint\n\n tmuxp_breakpoint()\n return\n else:\n from ..shell import launch\n\n launch(\n shell=shell,\n use_pythonrc=use_pythonrc, # shell: code\n use_vi_mode=use_vi_mode, # shell: ptpython, ptipython\n # tmux environment / libtmux variables\n server=server,\n session=session,\n window=window,\n pane=pane,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the name of the directory that will store the logfiles. If the SHELLLOGGERDIR environment variable is set, use that. Otherwise, default to ~/.shelllogger
def get_log_dir(): env_var = "SHELLLOGGERDIR" if os.environ.has_key(env_var): return os.environ[env_var] else: return os.path.expanduser('~/.shelllogger')
[ "def get_logging_dir(self):\n return self.logging_dir", "def platform_log_directory():\n\n LOG_DEFAULTS = {\n 'Darwin': os.path.expanduser('~/Library/Logs/Plex Media Server.log'),\n 'Linux': '/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Logs/Plex Media Server.log',\n 'Windows': os.path.join(os.environ.get('LOCALAPPDATA', 'c:'), 'Plex Media Server/Logs/Plex Media Server.log'),\n 'FreeBSD': '/usr/local/plexdata/Plex Media Server/Logs/Plex Media Server.log',\n }\n\n return LOG_DEFAULTS[platform.system()]", "def platform_log_directory():\n\n log_defaults = {\n 'Darwin': os.path.expanduser('~/Library/Logs/Plex Media Server.log'),\n 'Linux': '/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Logs/Plex Media Server.log',\n 'Windows': os.path.join(os.environ.get('LOCALAPPDATA', 'c:'), 'Plex Media Server/Logs/Plex Media Server.log'),\n 'FreeBSD': '/usr/local/plexdata/Plex Media Server/Logs/Plex Media Server.log',\n }\n\n return log_defaults[platform.system()]", "def _getLogDir(self):\n return 'Log'", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def get_logging_dir(appname='default'):\n from utool._internal import meta_util_cache\n from utool._internal import meta_util_cplat\n from utool import util_cache\n if appname is None or appname == 'default':\n appname = util_cache.get_default_appname()\n resource_dpath = meta_util_cplat.get_resource_dir()\n default = join(resource_dpath, appname, 'logs')\n # Check global cache for a custom logging dir otherwise\n # use the default.\n log_dir = meta_util_cache.global_cache_read(logdir_cacheid,\n appname=appname,\n default=default)\n log_dir_realpath = realpath(log_dir)\n return log_dir_realpath", "def get_pipe_logdir():\n return \"logs\"", "def get_log_path() -> Union[str, None]:\n return os.path.dirname(__logfile)", "def _default_log_dir():\n config_dir = os.path.abspath(os.path.dirname(self.config_filepath))\n log_dir = os.path.join(config_dir, \"logs\")\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n return log_dir", "def get_log_prefix_dir():\n return _log_prefix_dir", "def get_log_path():\n return LOG_PATH", "def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)", "def log_filename():\n # use the env variable if set, or fallback to default\n return os.environ.get('NBAUTOEVAL_LOG') \\\n or os.path.join(os.getenv(\"HOME\"), \".nbautoeval\")", "def get_logging_config():\n\n return os.path.normpath(os.path.join(os.path.dirname(__file__), '__logging__.ini'))", "def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file", "def new_custom_log_dir(self) -> str:", "def get_system_logfile():\n return \"system\" + get_day() + \".log\"", "def get_default_log():\n name = os.path.basename(sys.argv[0])\n pos = name.rfind('.')\n if pos != -1:\n name = name[:pos]\n name = os.path.abspath(os.path.join(ROOT, '../../Log/%s.log' % name))\n return name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert the .raw file, with illegal characters and escape keys, to a proper XML version. Returns the name of the XML file
def raw_to_xml(self): xmlfilename = self.logfilename.replace('.raw','.xml') fout = codecs.open(xmlfilename, encoding="utf-8", mode="w") for line in codecs.open(self.logfilename,encoding="utf-8"): fout.write(sanitize(line)) fout.close() return xmlfilename
[ "def _format_as_xml(self, txt_file_path, output_path):\n base_name = os.path.splitext(os.path.basename(txt_file_path))[0]\n xml_file_path = os.path.join(output_path, base_name + '.xml')\n if os.path.exists(xml_file_path):\n LOG.info(\"File already exist %s\", xml_file_path)\n return xml_file_path\n # Read in the file\n with open(txt_file_path, 'r', encoding=\"utf-8\") as f:\n lines = f.readlines()\n # process the file to fix illegal xml\n tags = ['<summary>', '<short_text>']\n filedata = []\n for i in range(len(lines)):\n if i != 0 and any(tag in lines[i-1] for tag in tags):\n # fix escape characters in content\n filedata.append(escape(lines[i]))\n elif 'doc' in lines[i]:\n # Replace <doc id=1> to <doc id=\"1\"> to complie with xml format\n fixed = re.sub(r'<doc id=([0-9]+)>', r'<doc id=\"\\1\">', lines[i])\n filedata.append(fixed)\n else:\n filedata.append(lines[i])\n # Write the file out again\n with open(xml_file_path, 'w') as f:\n f.write(\"<?xml version=\\\"1.0\\\"?>\\n<data>\\n\")\n f.writelines(filedata)\n f.write(\"\\n</data>\")\n LOG.info(\"Wrote sanitized xml file to %s\", xml_file_path)\n return xml_file_path", "def fix_xml(filename):\n io = StringIO()\n io.write(\"<root>\")\n with open(filename) as f:\n for line in f:\n if \"xml version\" in line:\n continue\n io.write(line.replace(\"\\uFEFF\", \"\"))\n io.write(\"</root>\")\n io.seek(0)\n return io", "def beautify_xml(XML):\n # convert XML file to modifiable string to beautify it\n text_string = ET.tostring(XML, encoding='UTF-8', method='xml')\n \n # insert line breaks before end of file tag\n file_string = text_string.replace('</aardvark>', '\\n\\n</aardvark>')\n \n # insert double new line before comments to create\n # blocks for each command\n file_string = file_string.replace('<!', '\\n\\n<!')\n \n # insert new line between each set of XML tags\n file_string = file_string.replace('><', '>\\n\\t<')\n \n # remove header\n # file_string = file_string.replace('<?xml version=\\'1.0\\' encoding=\\'utf8\\'?>\\n', '') \n \n return file_string", "def read_and_pre_process_xml(file_name):\n with open(file_name) as xml_file:\n return xml_file.read().replace('\\n', '')", "def generate_from_xml(self, filename):\n\n pass", "def print_xml(self, filename):\n\n # TODO: check what happens when input is not an xml file\n # TODO: add xmldec, processing instructions and comments\n\n xml_string = u'' # TODO: use a string buffer\n offset = 0\n stack = []\n\n for char in self.text:\n\n # any tags on the stack that can be closed?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n # any new opening tags?\n for t in self.source_tags.opening_tags.get(offset,[]):\n stack.append(t)\n xml_string += \"<%s%s>\" % (t.name, t.attributes_as_string())\n\n # any of those need to be closed immediately (non-consuming tags)?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n xml_string += escape(char)\n offset += 1\n\n fh = open(filename, 'w')\n fh.write(xml_string.encode('utf-8'))", "def fix_problematic_chars(self):\n\n # Fixing all XML files\n for language, dom_path in self.export_files.items():\n # To rename original file before reading it to remove ETX chars.\n old_export_file = \"{}.old\".format(dom_path)\n # Remove if exists\n if os.path.exists(old_export_file):\n os.remove(old_export_file)\n os.rename(dom_path, old_export_file)\n\n in_file = open(old_export_file, 'rb')\n out_file = open(dom_path, 'wb')\n # Reading file content, replacing ETX char and Vertical Tabs and writing back to output file\n out_file.write(in_file.read().replace(b'\\x03', b'').replace(b'\\x0b', b''))\n\n in_file.close()\n out_file.close()\n # Remove temp file\n os.remove(old_export_file)", "def file_name(self):\n return self.id_string + \".xml\"", "def __human_readable_parse(self):\n\n xml_file = open(self.__xml_path).read() #open de xml\n\n #Adding four whitespace by level of deepness\n xml_file = re.sub('>\\s*<cases>', '>\\n <cases>',xml_file)\n xml_file = re.sub(r'>\\s*</cases>', '>\\n </cases>',xml_file)\n xml_file = re.sub(r'>\\s*<group', '>\\n <group',xml_file)\n xml_file = re.sub(r'>\\s*</group>', '>\\n </group>',xml_file)\n xml_file = re.sub(r'>\\s*<case ', '>\\n <case ',xml_file)\n xml_file = re.sub(r'>\\s*</case>', '>\\n </case>',xml_file)\n xml_file = re.sub(r'>\\s*<susp_snippet', '>\\n <susp_snippet',xml_file)\n xml_file = re.sub(r'>\\s*<src_snippet', '>\\n <src_snippet',xml_file)\n xml_file = re.sub(r'>\\s*<annotation', '>\\n <annotation',xml_file)\n xml_file = re.sub(r'>\\s*</annotation>', '>\\n </annotation>',xml_file)\n xml_file = re.sub(r'>\\s*<phenomenon', '>\\n <phenomenon',xml_file)\n xml_file = re.sub(r'>\\s*<susp_chunk', '>\\n <susp_chunk',xml_file)\n xml_file = re.sub(r'>\\s*<src_chunk', '>\\n <src_chunk',xml_file)\n\n new_xml_file = open(self.__xml_path,'w')\n new_xml_file.write(xml_file)\n new_xml_file.close()", "def get_xml(file):\n output = \"xml code not found\"\n for line in file:\n if (\"<\" in line and \"</\" in line):\n output = line.split('\"', 1)[1] \n while (output[-1] != '\"'):\n output = output[:-1]\n output = output[:-1]\n\n output = output.replace(\"\\n\", \"\")\n output = output.replace(\"\\t\", \"\")\n output = BeautifulSoup(output).prettify() # Форматирование в читаемый вид\n\n return output", "def preparse_file_named_to_stream(name, out):\n name = os.path.abspath(name)\n dir, _ = os.path.split(name)\n cur = os.path.abspath(os.curdir)\n os.chdir(dir)\n contents = open(name).read()\n contents = handle_encoding_declaration(contents, out)\n parsed = preparse_file(contents, attached, do_time=True) \n os.chdir(cur)\n out.write(\"# -*- encoding: utf-8 -*-\\n\")\n out.write('#'*70+'\\n')\n out.write('# This file was *autogenerated* from the file %s.\\n' % name)\n out.write('#'*70+'\\n')\n out.write(parsed)", "def sanitize_xml(unsanitized):\n return re.sub(r'[^a-zA-Z0-9+_\\-/\\\\.]', '', six.ensure_str(unsanitized))", "def get_xml(file_name=None):\r\n file_path = Core.get_file_path(file_name)\r\n xml = ET.parse(file_path)\r\n return xml", "def meta2xml(meta, filename):\n\n # this is stupid, just use dict2xml\n xml = dict2xml(meta)\n with open(filename, 'w+') as output:\n output.write(xml)", "def test_utf8_xml_from_xml_file(self):\n # 'Россия' is 'Russia' in Cyrillic, not that it matters.\n xml = u\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <country>Россия</country>\"\"\"\n with tempfile.NamedTemporaryFile(suffix=\".xml\") as xmlfile:\n xmlfile.write(xml.encode('utf-8'))\n xmlfile.flush()\n\n j2k = glymur.Jp2k(self.j2kfile)\n with tempfile.NamedTemporaryFile(suffix=\".jp2\") as jfile:\n jp2 = j2k.wrap(jfile.name)\n xmlbox = glymur.jp2box.XMLBox(filename=xmlfile.name)\n jp2.append(xmlbox)\n\n box_xml = jp2.box[-1].xml.getroot()\n box_xml_str = ET.tostring(box_xml,\n encoding='utf-8').decode('utf-8')\n self.assertEqual(box_xml_str,\n u'<country>Россия</country>')", "def creer_fichier(nom_file):\n fichier = open(nom_file, 'w')\n fichier.write(\"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\\n\")\n fichier.close()", "def file_to_xml(cls, file_object):\r\n return etree.parse(file_object, parser=edx_xml_parser).getroot()", "def handle_illformed_xml(fp):\n\tcontents = fp.read().replace('\"t', '\" t')\n\treturn StringIO.StringIO(contents)", "def ler_arquivo_xml(self, diretorio):\r\n with open(diretorio, 'r') as fxml:\r\n\t strfx = fxml.readlines()\r\n\t string = \"\".join(strfx).replace(\"&\",\" e \")\r\n return string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the report template
def _report_template(): current_dir = Path(__file__).parent with open(current_dir / "report_template.html", "r") as f: template = f.read() template = re.sub(r"\s{2,}", " ", template) template = re.sub(r"\n", "", template) template = re.sub(r"> <", "><", template) return template
[ "def get_template(self):\n return self.template", "def _get_template_filename(self):\n file_name = ReportMeta.reports[self._report_key]['fileName']\n return '{}.html'.format(file_name)", "def get_template(self, context, **kwargs):\r\n return self.template", "def GetTemplate(self, _page_data):\n return self.template", "def _template(self):\n return getattr(self, self.template)()", "def get_template(self):\n # TODO: Define default template in portal\n return Template.objects.get(name=\"Article\")", "def get_html_report(self) -> str:\n template_contents = dict(\n vendor_bundle_js=self.vendor_bundle,\n app_bundle_js=self.app_bundle,\n # results\n results=self.results,\n # account metadata\n account_id=self.account_id,\n account_name=self.account_name,\n report_generated_time=str(self.report_generated_time),\n cloudsplaining_version=__version__,\n )\n template_path = os.path.dirname(__file__)\n env = Environment(loader=FileSystemLoader(template_path)) # nosec\n template = env.get_template(\"template.html\")\n return template.render(t=template_contents)", "def get_template_name(self):\n\t\treturn self.template_name", "def template_file(self) -> str:\n return jsii.get(self, \"templateFile\")", "def get_template(self):\n return '{}/{}.html'.format(self.content_object._meta.app_label, self.content_object._meta.model_name)", "def download_template(self):\n return self._pj([self.classpath, 'templates', 'D2C1_template.tsv'])", "def get_notification_template(self):\n if self.db_config_file.key_exists(\"notification_template_file\"):\n filename = self.db_config_file_value(\"notification_template_file\").strip('\"')\n return open(filename, 'rt').read()\n\n return get_data(\"asebackupcli\", \"notification.json\")", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def getTemplate():\n\n with open('/home/sevudan/Scripts/projects/topogen/template.cfg', 'r') as file:\n data = file.read()\n file.close()\n return Template(data)", "def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)", "def document_template(self):\n return self._document_template", "def get_html_template(self):\n file_path = '{}{}.html'.format(self.folder, self.code)\n return get_template(file_path)", "def template(self):\n return Template(self.content)", "def template(self):\n data = self._get_template_data()\n if data is None:\n return None\n return template.Template(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render exception_data as an html report
def render_exception_html(exception_data, report_template=None): report_template = report_template or _report_template() jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"]) exception_data["repr"] = repr return jinja_env.from_string(report_template).render(exception_data)
[ "def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):\n exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)\n if data_processor:\n exception_data = data_processor(exception_data)\n\n if output_format == \"html\":\n text = render_exception_html(exception_data)\n elif output_format == \"json\":\n text = render_exception_json(exception_data)\n else:\n raise TypeError(\"Exception report format not correctly specified\")\n\n filename = gen_error_filename(extension=output_format)\n\n report_location = storage_backend.write(filename, text)\n\n return report_location", "def renderHTTP_exception(request, failure):", "def exception_report(storage_backend=LocalErrorStorage(), output_format=\"html\", data_processor=None):\n\n def _exception_reports(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, tb = sys.exc_info()\n\n report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)\n\n e = append_to_exception_message(e, tb, f\"[report:{report_location}]\")\n setattr(e, \"report\", report_location)\n\n # We want to raise the original exception:\n # 1) with a modified message containing the report location\n # 2) with the original traceback\n # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)\n raise e from None\n\n return decorator(_exception_reports)", "def render_exception_json(exception_data):\n return json.dumps(exception_data, default=_json_serializer)", "def _render_export_compliance_error(service_name, sso_username, email, quay_username):\n\n error_info = {\n \"reason\": \"exportcomplianceerror\",\n \"service_name\": service_name,\n \"sso_username\": sso_username,\n \"email\": email,\n \"quay_username\": quay_username,\n }\n\n resp = index(\"\", error_info=error_info)\n resp.status_code = 400\n return resp", "def get_html_response(\n self,\n request: HttpRequest,\n exception: Problem,\n context: ContextDict,\n status: int,\n ) -> HttpResponse:\n return render(\n request=request,\n template_name=self.get_template_names(request, exception, context),\n context=context,\n using=getattr(settings, 'SAFESPACE_TEMPLATE_ENGINE', None),\n status=status,\n )", "def errors_as_html(error_data, error_template=None, no_default_css=False):\n if error_template is None:\n error_template = \"error_template.html\"\n template_loader = jinja2.PackageLoader(\"graderutils\", \"static\")\n else:\n template_loader = jinja2.FileSystemLoader(\"./\")\n\n env = jinja2.Environment(loader=template_loader)\n template = env.get_template(error_template)\n return template.render(errors=error_data, no_default_css=no_default_css)", "def send_exception(self, exception):\n # Import traceback\n import traceback\n\n # Set page content\n page = \"\"\"<html>\n<head>\n<title>500 - Error retrieving page</title>\n</head>\n<body>\n<h1>Exception treating request to {path}</h1>\n<p>Exception : {type}</p>\n<p>{trace}</p>\n</body>\n</html>\"\"\".format(path=self.path, type=type(exception).__name__,\n trace=traceback.format_tb(exception.__traceback__))\n\n # Convert the page content to bytes and send it\n self.send_data(page, 500)", "def generate_html_report(self):\n report = self._report_accessor.report\n hard_section = self._generate_symbol_report(report.hard_good_symbols)\n medium_section = self._generate_symbol_report(report.medium_good_symbols)\n\n hard_msg_symbols_html = ['%s<br>' % symbol for symbol in hard_section]\n medium_msg_symbols_html = ['%s<br>' % symbol\n for symbol in medium_section]\n msg_html = \"\"\"<h2>Stats</h2>\n <p>Date: %s<br>\n count_total = %d<br>\n count_correct_analysis = %d</p>\n <h2>Hard filtered symbols</h2><p>%s</p>\n <h2>Medium filtered symbols</h2><p>%s</p>\"\"\" % (\n report.date,\n report.count_total,\n report.count_correct_analysis,\n \"\".join(hard_msg_symbols_html),\n \"\".join(medium_msg_symbols_html))\n return msg_html", "def process_exception(self, request, exception):\n gc = GithubCredentials(\n user=settings.EXREPORTER_GITHUB_USER,\n repo=settings.EXREPORTER_GITHUB_REPO,\n auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)\n gs = GithubStore(credentials=gc)\n reporter = ExReporter(\n store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)\n\n reporter.report()", "def technical_500_response(request, exc_type, exc_value, status_code=500):\n context = {'exc_type':str(exc_type)[1:-1],'exc_value':exc_value}\n \n with Path(CURRENT_DIR, 'templates', 'technical_500.html').open() as fh:\n content = HtmlTemplite(fh.name).render(context)\n\n return WSGIResponse(content, 'text/html', 500)", "def _get_report(self, entry):\n context = entry.get('@context', {})\n exception = entry.get('@exception', {})\n\n issue_type = entry.get('issue_type')\n message = entry.get('@message')\n details = ''\n\n # labels to add a report\n labels = ['security']\n\n assert message is not None, '@message should not be empty'\n\n if issue_type == 'CSRF':\n assert context.get('hookName') is not None, '@context.hookName should be defined'\n\n # @see https://cwe.mitre.org/data/definitions/352.html\n labels.append('CWE-352')\n details = \"\"\"\n*A [Cross-site request forgery|https://cwe.mitre.org/data/definitions/352.html] attack is possible here*!\nAn attacker can make a request on behalf of a current Wikia user.\n\nPlease refer to [documentation on Confluence|https://fandom.atlassian.net/wiki/display/SEC/Cross-Site+Request+Forgery] on how to protect your code.\n\n*Transaction*: {{{{{transaction}}}}}\n*Action performed*: {{{{{action_performed}}}}}\n*Token checked*: {token_checked}\n*HTTP method checked*: {method_checked}\n\"\"\".format(\n transaction=context.get('transaction'), # e.g. api/nirvana/CreateNewWiki\n action_performed=context.get('hookName'), # e.g. WikiFactoryChanged\n token_checked='checked' if context.get('editTokenChecked') is True else '*not checked*',\n method_checked='checked' if context.get('httpMethodChecked') is True else '*not checked*',\n )\n\n # format the report\n full_message = self.FULL_MESSAGE_TEMPLATE.format(\n message=message,\n details=details.strip(),\n backtrace=self._get_backtrace_from_exception(exception, offset=5) # skip backtrace to CSRFDetector\n ).strip()\n\n description = self.REPORT_TEMPLATE.format(\n env=self._get_env_from_entry(entry),\n source_host=entry.get('@source_host', 'n/a'),\n context_formatted=json.dumps(entry.get('@context', {}), indent=True),\n fields_formatted=json.dumps(entry.get('@fields', {}), indent=True),\n full_message=full_message,\n url=self._get_url_from_entry(entry) or 'n/a'\n ).strip()\n\n report = Report(\n summary=message,\n description=description,\n label=self.REPORT_LABEL\n )\n\n # add security issue specific labels\n [report.add_label(label) for label in labels]\n\n return report", "def test_get_processor_exception_html(self):\r\n for type in [CCProcessorSignatureException, CCProcessorWrongAmountException, CCProcessorDataException]:\r\n error_msg = \"An exception message of with exception type {0}\".format(str(type))\r\n exception = type(error_msg)\r\n html = get_processor_exception_html(exception)\r\n self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html)\r\n self.assertIn('Sorry!', html)\r\n self.assertIn(error_msg, html)\r\n\r\n # test base case\r\n self.assertIn(\"EXCEPTION!\", get_processor_exception_html(CCProcessorException()))", "def formatException(self, exc_info):\n type_, value, trcbk = exc_info\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n row = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n self.writer.writerow(row)\n\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def xml(self):\n strg = \"<Exception>\\n\"\n strg += \"<Object>\\n\"\n strg += \"%s\\n\" % self.name\n strg += \"</Object>\\n\"\n strg += \"<Message>\\n\"\n strg += self._message\n strg += \"</Message>\\n\"\n strg += \"<DataItems>\\n\"\n for key, value in viewitems(self.data):\n strg += \"<DataItem>\\n\"\n strg += \"<Key>\\n\"\n strg += str(key)\n strg += \"</Key>\\n\"\n strg += \"<Value>\\n\"\n strg += str(value)\n strg += \"</Value>\\n\"\n strg += \"</DataItem>\\n\"\n strg += \"</DataItems>\\n\"\n strg += \"</Exception>\\n\"\n logging.error(strg)\n return strg", "def write_error(self, status_code, **kwargs):\r\n if hasattr(self, 'get_error_html'):\r\n if 'exc_info' in kwargs:\r\n exc_info = kwargs.pop('exc_info')\r\n kwargs['exception'] = exc_info[1]\r\n try:\r\n # Put the traceback into sys.exc_info()\r\n raise_exc_info(exc_info)\r\n except Exception:\r\n self.finish(self.get_error_html(status_code, **kwargs))\r\n else:\r\n self.finish(self.get_error_html(status_code, **kwargs))\r\n return\r\n if self.settings.get(\"serve_traceback\") and \"exc_info\" in kwargs:\r\n # in debug mode, try to send a traceback\r\n self.set_header('Content-Type', 'text/plain')\r\n for line in traceback.format_exception(*kwargs[\"exc_info\"]):\r\n self.write(line)\r\n self.finish()\r\n else:\r\n self.finish(\"<html><title>%(code)d: %(message)s</title>\"\r\n \"<body>%(code)d: %(message)s</body></html>\" % {\r\n \"code\": status_code,\r\n \"message\": self._reason,\r\n })", "def __new__(self, workload, trace):\n return JINJA_ENV \\\n .get_template('workload-exception.html') \\\n .render(workload=workload, trace=trace)", "def get_error_report(exception, **kwargs):\r\n\r\n if hasattr(exception, \"stored_report\"):\r\n result = exception.stored_report\r\n else:\r\n if hasattr(exception, \"exc_info\"):\r\n kwargs[\"exception_info\"] = exception.exc_info\r\n\r\n result = get_traceback(**kwargs)\r\n result += \"\\n{exc.__class__.__name__}: {exc}\\n\".format(exc=exception)\r\n return result", "def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render exception_data as a json object
def render_exception_json(exception_data): return json.dumps(exception_data, default=_json_serializer)
[ "def jsonify_http_exception(exception: HTTPException):\n return jsonify(exception.description, exception.code)", "def _log_and_jsonify_exception(e):\n app.logger.exception(e)\n if hasattr(e, \"json\") and e.json:\n return jsonify(**e.json), e.code\n else:\n return jsonify(message=e.message), e.code", "def ToJson(self):\n return json.dumps({\n 'error': {\n 'errors': self.Errors(),\n 'code': 400,\n 'message': self.Message(),\n },\n })", "def get_er_exceptions():\n express_route_exceptions_lst = []\n try:\n for i in get_data():\n if i['expressRoute'] is False:\n express_route_exceptions_lst.append(i)\n express_route_exceptions_dic = {'expressRoutesExceptions': express_route_exceptions_lst}\n return get_json(express_route_exceptions_dic)\n except ValueError as e:\n print(e)", "def get_json_error(self):\n return {\"errno\": self.errno, \"error\": self.message}", "def AsJson(self):\n\n return json.dumps(self._errors)", "def to_dict(self):\n exception_dict = dict(message=self.message)\n return exception_dict", "def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')", "def runtime_exception():\n\n result = {\n 'message': 'Server Error (Unhandled Exception Occured): Please make sure that you have passed correct number of arguments in POST request in json format'\n }\n\n return jsonify(result), 500", "def render_exception_html(exception_data, report_template=None):\n report_template = report_template or _report_template()\n jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=[\"jinja2.ext.autoescape\"])\n exception_data[\"repr\"] = repr\n return jinja_env.from_string(report_template).render(exception_data)", "def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n exception = kwargs.get(\"exception\")\n\n context.update({\n \"exception\": exception,\n \"exception_type\": exception.__class__.__name__ if exception else None,\n \"exception_msg\": exception.message if exception and hasattr(exception, 'message') else str(exception) if exception else None,\n \"extra_message\": kwargs.get(\"extra_message\"),\n })\n return context", "def _ExceptionResponse(args_dict=None):\n if args_dict is None:\n args_dict = {}\n args_dict[\"code\"] = \"Exception\"\n return CGateway._DumpResponse(args_dict)", "def custom_exception_handler(exc):\n response = exception_handler(exc)\n\n if response is not None:\n response.data['status_code'] = response.status_code\n response.data['error'] = response.data['detail']\n del response.data['detail']\n\n return response", "def formatResponse(self, data, request):\n if data:\n rendered = json.dumps(data)\n else:\n rendered = b\"\"\n\n request.responseHeaders.addRawHeader(b\"Content-Type\", b\"application/vnd.api+json\")\n\n return rendered", "def get_json_response(\n self,\n request: HttpRequest,\n exception: Problem,\n context: ContextDict,\n status: int,\n ) -> JsonResponse:\n content = {\n 'code': context['code'],\n 'error': context['message'],\n 'title': context['title'],\n }\n return JsonResponse(content, status=status)", "def _render_fault(self, message, details, code=500):\n\n body = {\n 'identityFault': {\n \"message\": message,\n \"details\": details,\n \"code\": code\n }\n }\n return wsme.rest.json.encode_error(None, body)", "def render_diagnostics(request, diagnostics_dict, status=200):\n return HttpResponse(json.dumps(diagnostics_dict), status=status)", "def renderHTTP_exception(request, failure):", "def _build_event_data(self, record):\r\n\r\n logger_name = record.name if record.name else None\r\n event_data = {\r\n \"Timestamp\": _get_local_timestamp(record),\r\n \"Level\": logging.getLevelName(record.levelno),\r\n \"MessageTemplate\": record.getMessage(),\r\n \"Properties\": get_global_log_properties(logger_name)\r\n }\r\n if hasattr(record, 'args'):\r\n # Standard (unnamed) format arguments (use 0-base index as property name).\r\n log_props_shim = get_global_log_properties(record.name)\r\n\r\n for (arg_index, arg) in enumerate(record.args or []):\r\n event_data[\"Properties\"][str(arg_index)] = arg\r\n\r\n if hasattr(record, 'log_props'):\r\n # assume record is StructuredLogRecord\r\n for prop_name in record.log_props.keys():\r\n event_data[\"Properties\"][prop_name] = record.log_props[prop_name]\r\n\r\n for log_prop_name in event_data[\"Properties\"].keys():\r\n # bytes is not serialisable to JSON; encode appropriately.\r\n log_prop = event_data[\"Properties\"][log_prop_name]\r\n arg = _encode_bytes_if_required(log_prop)\r\n arg = best_effort_json_encode(arg)\r\n event_data[\"Properties\"][log_prop_name] = arg\r\n\r\n if record.exc_text:\r\n # Rendered exception has already been cached\r\n event_data[\"Exception\"] = record.exc_text\r\n elif self._support_stack_info and record.stack_info and not record.exc_info:\r\n # Feature flag is set: fall back to stack_info (sinfo) if exc_info is not present\r\n event_data[\"Exception\"] = record.stack_info\r\n elif isinstance(record.exc_info, tuple):\r\n # Exception info is present\r\n if record.exc_info[0] is None and self._support_stack_info and record.stack_info:\r\n event_data[\"Exception\"] = \"{0}--NoExeption\\n{1}\".format(logging.getLevelName(record.levelno), record.stack_info)\r\n else:\r\n event_data[\"Exception\"] = record.exc_text = self.formatter.formatException(record.exc_info)\r\n elif record.exc_info:\r\n # Exception info needs to be captured\r\n exc_info = sys.exc_info()\r\n if exc_info and exc_info[0] is not None:\r\n event_data[\"Exception\"] = record.exc_text = self.formatter.formatException(record.exc_info)\r\n\r\n return event_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context).
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None): source = None if loader is not None and hasattr(loader, "get_source"): with suppress(ImportError): source = loader.get_source(module_name) if source is not None: source = source.splitlines() if source is None: with suppress(OSError, IOError): with open(filename, "rb") as fp: source = fp.read().splitlines() if source is None: return None, [], None, [] try: # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], bytes): encoding = "ascii" for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br"coding[:=]\s*([-\w.]+)", line) if match: encoding = match.group(1).decode("ascii") break source = [str(sline, encoding, "replace") for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1 : upper_bound] return lower_bound, pre_context, context_line, post_context except Exception as e: try: context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>' except Exception: context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>" return lineno, [], context_line, []
[ "def _get_lines_from_file(filename, lineno, context_lines):\r\n try:\r\n source = open(filename).readlines()\r\n lower_bound = max(0, lineno - context_lines)\r\n upper_bound = lineno + context_lines\r\n\r\n pre_context = \\\r\n [line.strip('\\n') for line in source[lower_bound:lineno]]\r\n context_line = source[lineno].strip('\\n')\r\n post_context = \\\r\n [line.strip('\\n') for line in source[lineno + 1:upper_bound]]\r\n\r\n return lower_bound, pre_context, context_line, post_context\r\n except (OSError, IOError):\r\n return None, [], None, []", "def _get_lines_from_file(filename, lineno, context_lines):\n\n try:\n source = open(filename).readlines()\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = \\\n [line.strip('\\n') for line in source[lower_bound:lineno]]\n context_line = source[lineno].strip('\\n')\n post_context = \\\n [line.strip('\\n') for line in source[lineno + 1:upper_bound]]\n\n return lower_bound, pre_context, context_line, post_context\n except (OSError, IOError):\n return None, [], None, []", "def get_code_context_around_line(filename: str, linenum: int) -> list[str]:\n if os.path.isfile(filename):\n index = linenum - 1\n with open(filename) as f:\n index_above = index - 2\n index_above = index_above if index_above >= 0 else 0\n return [x.rstrip() for x in f.readlines()[index_above : index + 1]]\n return []", "def get_context(error, source):\n context = []\n source.seek(0)\n # number of lines to use as context\n n_lines = 2\n # range to get\n lower_bound = error.line - n_lines\n upper_bound = error.line\n\n for count, line in enumerate(source):\n # get lines that are within context range\n if count + 1 >= lower_bound:\n fmt_line = \"| \".join([str(count + 1), line.strip()])\n context.append(fmt_line)\n if count + 1 == upper_bound:\n source.close()\n break\n return context", "def line_offsets(fname):\n line_offset = []\n offset = 0\n for _, line in enumerate( open(fname) ):\n line_offset.append(offset)\n offset += len(line)\n return line_offset", "def guess_lineno(file):\n offset = file.tell()\n file.seek(0)\n startpos = 0\n lineno = 1\n # looks like file.read() return bytes in python3\n # so I need more complicated algorithm here\n while True:\n line = file.readline()\n if not line:\n break\n endpos = file.tell()\n if startpos <= offset < endpos:\n break\n lineno += 1\n file.seek(offset)\n return lineno", "def line_pos(self) -> int:\n return self.source_position()[1]", "def filelineno():\n if not _state:\n raise RuntimeError(\"no active input()\")\n return _state.filelineno()", "def source_position(self) -> Tuple[int, int]:\n return self.templated_file.get_line_pos_of_char_pos(\n self.source_slice.start, source=True\n )", "def line_numbers(self) -> List[int]:\n\n if self.begin_lineno is None:\n return []\n\n elif self.end_lineno is None:\n return [self.begin_lineno]\n\n return list(range(self.begin_lineno, self.end_lineno + 1))", "def _diffContext(diff, n=3):\n nlines = len(diff)\n clines = set() # set of lines to include\n for i, line in enumerate(diff):\n if line[0] != ' ':\n clines |= set(range(max(0, i-n), min(i+n+1, nlines)))\n context = []\n clines = list(clines)\n clines.sort()\n last = -1\n for i in clines:\n if i != last+1:\n context.append(\" ...\\n\")\n context.append((\"%4d: \"%i) + diff[i])\n last = i\n if clines[-1] != nlines-1:\n context.append(\" ...\\n\")\n return context", "def getpos(self):\n return self.lineno, self.offset", "def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n result = []\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n result.append((addr, lineno))\n lastlineno = lineno\n addr += byte_incr\n lineno += line_incr\n if lineno != lastlineno:\n result.append((addr, lineno))\n return result", "def getlineno(frame):\r\n # FrameType.f_lineno is now a descriptor that grovels co_lnotab\r\n return frame.f_lineno", "def _ecl_get_lineno(self, frame=None):\n try:\n f = _ecl_interpreted_frame(frame)\n map = f.f_locals[\"_ecl\"]._linemap\n return map[f.f_lineno]\n except:\n return 0", "def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n yield (addr, lineno)\n lastlineno = lineno\n addr += byte_incr\n if line_incr >= 0x80:\n # line_increments is an array of 8-bit signed integers\n line_incr -= 0x100\n lineno += line_incr\n if lineno != lastlineno:\n yield (addr, lineno)", "def get_line_map(self, filename):\n if filename not in self.source_map:\n template_source = read_template_source(filename)\n if SHOW_TRACING: # change to see the template text\n for i in range(0, len(template_source), 10):\n print_log(\"%3d: %r\" % (i, template_source[i:i+10]))\n self.source_map[filename] = make_line_map(template_source)\n return self.source_map[filename]", "def _get_file_and_line():\n code, f = _get_caller()\n if not code:\n return '<unknown>', 0\n return code.co_filename, f.f_lineno", "def get_previous_line_number(self):\n if len(self.token_data) > 0 and self.pos > 1:\n return self.token_data[self.pos-2][2]\n return -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an exception report and return its location
def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False): exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb) if data_processor: exception_data = data_processor(exception_data) if output_format == "html": text = render_exception_html(exception_data) elif output_format == "json": text = render_exception_json(exception_data) else: raise TypeError("Exception report format not correctly specified") filename = gen_error_filename(extension=output_format) report_location = storage_backend.write(filename, text) return report_location
[ "def get_error_report(exception, **kwargs):\r\n\r\n if hasattr(exception, \"stored_report\"):\r\n result = exception.stored_report\r\n else:\r\n if hasattr(exception, \"exc_info\"):\r\n kwargs[\"exception_info\"] = exception.exc_info\r\n\r\n result = get_traceback(**kwargs)\r\n result += \"\\n{exc.__class__.__name__}: {exc}\\n\".format(exc=exception)\r\n return result", "def report_exception(self, filename, exc):\n return EventReport(filename, events=[Error(str(exc))])", "def create_exception(self, msg: str):", "def formatReport(cls, instance, trcback, context=1):\n\n\theader = []\n\theader.append(\"Exception in '{0}'.\".format(getInnerMostFrame(trcback).f_code.co_name))\n\theader.append(\"Exception class: '{0}'.\".format(cls.__name__))\n\theader.append(\"Exception description: '{0}'.\".format(instance.__doc__ and instance.__doc__.strip() or \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tConstants.nullObject))\n\tfor i, line in enumerate(str(instance).split(\"\\n\")):\n\t\theader.append(\"Exception message line no. '{0}' : '{1}'.\".format(i + 1, line))\n\n\tframes = []\n\tfor frame, locals in extractLocals(trcback):\n\t\tframes.append(\"Frame '{0}' in '{1}' at line '{2}':\".format(*frame))\n\t\targuments, namelessArgs, keywordArgs, locals = locals\n\t\tany((arguments, namelessArgs, keywordArgs)) and frames.append(\"{0:>40}\".format(\"Arguments:\"))\n\t\tfor key, value in arguments.iteritems():\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tfor value in namelessArgs:\n\t\t\tframes.append(\"{0:>40}\".format(value))\n\t\tfor key, value in sorted(keywordArgs.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tlocals and frames.append(\"{0:>40}\".format(\"Locals:\"))\n\t\tfor key, value in sorted(locals.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tframes.append(str())\n\n\ttrcback = formatException(cls, instance, trcback)\n\n\treturn header, frames, trcback", "def process_exception(self, request, exception):\n gc = GithubCredentials(\n user=settings.EXREPORTER_GITHUB_USER,\n repo=settings.EXREPORTER_GITHUB_REPO,\n auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)\n gs = GithubStore(credentials=gc)\n reporter = ExReporter(\n store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)\n\n reporter.report()", "def __init__(self):\r\n try:\r\n self.file = open(REPORT_FILE, 'w')\r\n except OSError:\r\n print('Problem opening log file')\r\n exit(1)", "def exception_report(storage_backend=LocalErrorStorage(), output_format=\"html\", data_processor=None):\n\n def _exception_reports(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, tb = sys.exc_info()\n\n report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)\n\n e = append_to_exception_message(e, tb, f\"[report:{report_location}]\")\n setattr(e, \"report\", report_location)\n\n # We want to raise the original exception:\n # 1) with a modified message containing the report location\n # 2) with the original traceback\n # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)\n raise e from None\n\n return decorator(_exception_reports)", "def error_report(exc_info, program=None, output=\"\", email=None, files=[], listdirs=[], savedir=\"/tmp\", title=\"AUTOMATICALLY GENERATED ERROR REPORT\"):\n # Note: I intentionally do not check types up front. This routine must\n # continue no matter what. It does the best it can in the face \n # of errors.\n \n # collect information...\n # date and time\n try:\n now = time.localtime()\n except:\n now = (1980, 1, 1, 0, 0, 0, 0, 1, 0)\n try:\n time_str = time.strftime(\"%a, %d %b %Y %H:%M:%S %Z\", now)\n except:\n time_str = \"\"\n \n # stack trace\n try:\n trace = \"\".join(traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))\n except:\n trace = \"Unable to process stack trace.\\n\" \n\n # load averages\n try:\n load_avg = str(os.getloadavg())\n except:\n load_avg = \"Unable to determine load averages.\" \n \n # process listing \n try:\n tmp = os.popen(\"ps -wweF\")\n ps = \"\".join(tmp.readlines())\n tmp.close()\n except:\n ps = \"Unable to retrieve process listing.\\n\" \n \n # memory info \n try:\n tmp_in, tmp_out, tmp_err = os.popen3(\"free -t\")\n tmp_in.close()\n tmp_str = \"\".join(tmp_err.readlines()).strip()\n tmp_err.close()\n if tmp_str != \"\":\n memory = \"Unable to retrieve memory information.\\n\" \n else:\n memory = \"\".join(tmp_out.readlines())\n tmp_out.close()\n except:\n memory = \"Unable to retrieve memory information.\\n\" \n \n # disk info \n try:\n tmp_in, tmp_out, tmp_err = os.popen3(\"df -ah\")\n tmp_in.close()\n tmp_str = \"\".join(tmp_err.readlines()).strip()\n tmp_err.close()\n if tmp_str != \"\":\n disk = \"Unable to retrieve disk information.\\n\" \n else:\n disk = \"\".join(tmp_out.readlines())\n tmp_out.close()\n except:\n disk = \"Unable to retrieve disk information.\\n\" \n \n # runtime environment \n uname = \" \".join(os.uname())\n cwd = os.getcwd()\n path = os.getenv(\"PATH\", \"Unable to determine path.\")\n username = getusername()\n hostname = socket.gethostname()\n cmdline = \" \".join(sys.argv[0:])\n \n # file contents\n file_contents = \"\"\n try:\n iter(files)\n except:\n files = []\n for filename in files:\n if not isinstance(filename, str):\n continue\n file_contents += \"\\n\" + filename + \"\\n\" + (\"-\" * len(filename)) + \"\\n\"\n try:\n tmp = file(filename, \"r\")\n file_contents += \"\".join(tmp.readlines()) + \"\\n\"\n tmp.close()\n except:\n file_contents += \"Unable to read file.\\n\\n\" \n \n # directory listings\n directory_list = \"\"\n try:\n iter(listdirs)\n except:\n listdirs = []\n for directory in listdirs:\n if not isinstance(directory, str):\n continue\n try:\n tmp_out = os.popen(\"ls -AlFRq \" + directory + \" 2>&1\")\n directory_list += \"\".join(tmp_out.readlines()) + \"\\n\"\n tmp_out.close()\n except:\n directory_list += \"Unable to read directory tree: \" + directory + \"\\n\\n\" \n\n # output\n if not isinstance(output, str):\n output = \"No output information given.\\n\"\n elif not output.endswith(\"\\n\"):\n output += \"\\n\" \n \n # title\n if not isinstance(title, str):\n title = \"AUTOMATICALLY GENERATED ERROR REPORT\"\n \n # program\n if not isinstance(program, str):\n try:\n # try to determine the name of the program \n program = os.path.basename(sys.argv[0])\n if program == \"\":\n # running from the python shell\n program = \"python\"\n except:\n program = \"UNKNOWN\" \n \n # build error report\n report = title + \"\\n\\n\\n\" + \\\n \"Stack trace\\n===========\\n\" + trace + \"\\n\" + \\\n \"Time\\n====\\n\" + time_str + \"\\n\\n\" + \\\n \"Program reporting the error\\n===========================\\n\" + program + \"\\n\\n\" + \\\n \"Command line\\n============\\n\" + cmdline + \"\\n\\n\" + \\\n \"Program output\\n==============\\n\" + output + \"\\n\" + \\\n \"Current directory\\n=================\\n\" + cwd + \"\\n\\n\" + \\\n \"System\\n======\\n[\" + username + \"@\" + hostname + \"] \" + uname + \"\\n\\n\" + \\\n \"Path\\n====\\n\" + path + \"\\n\\n\" + \\\n \"Memory usage\\n============\\n\" + memory + \"\\n\" + \\\n \"Load averages (1, 5, 15 min)\\n============================\\n\" + load_avg + \"\\n\\n\" + \\\n \"Process listing\\n===============\\n\" + ps + \"\\n\" + \\\n \"Disk usage\\n==========\\n\" + disk + \"\\n\" \n if len(listdirs) > 0:\n report += \"Directory listings\\n==================\\n\" + directory_list \n \n if len(files) > 0: \n report += \"File contents\\n=============\\n\" + file_contents\n \n # write to a file \n saved = None\n if savedir != None:\n if not isinstance(savedir, str):\n savedir = \"/tmp\"\n try:\n os.makedirs(savedir)\n except:\n pass \n filename = os.path.join(savedir, program + \".error.\" + time.strftime(\"%d%b%Y_%H.%M.%S_%Z\", now))\n try:\n outfile = file(filename, \"w\")\n outfile.write(report)\n outfile.close()\n saved = filename \n except:\n pass\n \n # send mail \n mailed = False \n if email != None and username != \"UNKNOWN USERNAME\":\n try:\n iter(email)\n except:\n email = []\n for recipient in email:\n if not isinstance(recipient, str):\n continue\n \n # TODO FIXME smtplib and sendmail may report that they sent the \n # mail successfully, even when they didn't. This happens, for\n # example, if mail is set up for local delivery only. In this\n # situation the mail programs do not give any indication of \n # error. To fix this, we will need to investigate the mail \n # system ourselves, or have some sort of response sent back to\n # the client. \n\n # try smtplib first \n try:\n server = smtplib.SMTP(\"localhost\")\n server.sendmail(username + \"@\" + hostname, recipient, \"Subject: \" + title + \"\\n\\n\" + report)\n server.quit()\n mailed = True\n except:\n pass \n\n # if smtplib failed, try sendmail\n if not mailed:\n # make a list of default places to look\n find_dirs = [\"/usr/sbin\", \"/usr/local/sbin\", \"/sbin\", \"/usr/bin\", \"/usr/local/bin\", \"/bin\"]\n \n # additionally, try other places in the path\n try:\n find_dirs = uniq(find_dirs + os.environ.get(\"PATH\").split(os.pathsep))\n except:\n pass \n \n # try finding sendmail in each of the above directories \n for sendmail in find_dirs:\n sendmail = os.path.join(sendmail, \"sendmail\")\n if os.path.isfile(sendmail):\n try:\n p = os.popen(sendmail + \" -t\", \"w\")\n p.write(\"To: \" + recipient + \"\\n\")\n p.write(\"Subject: \" + title + \"\\n\")\n p.write(\"\\n\") \n p.write(report)\n status = p.close()\n if status == None or status == 0:\n mailed = True\n except:\n pass\n \n # mail was sent, exit the for loop \n break\n \n # return results \n return mailed, saved, report", "def create_report_file(self, report_definition):\n try:\n return ReportFile.objects.request(report_definition=report_definition,\n client_customer_id=self.account_id)\n except RateExceededError as exc:\n logger.info(\"Caught RateExceededError for account '%s' - retrying in '%s' seconds.\", self.pk, exc.retry_after_seconds)\n raise self.get_account_data.retry(exc, countdown=exc.retry_after_seconds)\n except GoogleAdsError as exc:\n raise InterceptedGoogleAdsError(exc, account_id=self.account_id)", "def getErrorReport(self):\n return self.sError;", "def __create_failure_report(self, classname, failure_desc):\n match = FAILURE_LOC_RE.match(failure_desc[0])\n if not match:\n raise ValueError(\"Unexpected failure description format.\\n\"\n \"Expected the first line to contain details \"\n \"of the location of the error.\\n\"\n \"Found '%s'\" % failure_desc[0])\n name = match.group(3)\n return TestCaseReport(classname, name, \"\\n\".join(failure_desc))", "def ReportError(text):\n raise IOError(text)", "def get_report_path_hash(report) -> str:\n report_path_hash = ''\n events = [i for i in report.bug_path if i.get('kind') == 'event']\n for event in events:\n file_name = \\\n os.path.basename(report.files.get(event['location']['file']))\n line = str(event['location']['line'] if 'location' in event else 0)\n col = str(event['location']['col'] if 'location' in event else 0)\n\n report_path_hash += line + '|' + col + '|' + event['message'] + \\\n file_name\n\n report_path_hash += report.check_name\n\n if not report_path_hash:\n LOG.error('Failed to generate report path hash!')\n LOG.error(report.bug_path)\n\n LOG.debug(report_path_hash)\n return __str_to_hash(report_path_hash)", "def get_report_path(self):\n report_path = os.path.join(logPath, \"report.html\")\n return report_path", "def _get_report(self, entry):\n context = entry.get('@context', {})\n exception = entry.get('@exception', {})\n\n issue_type = entry.get('issue_type')\n message = entry.get('@message')\n details = ''\n\n # labels to add a report\n labels = ['security']\n\n assert message is not None, '@message should not be empty'\n\n if issue_type == 'CSRF':\n assert context.get('hookName') is not None, '@context.hookName should be defined'\n\n # @see https://cwe.mitre.org/data/definitions/352.html\n labels.append('CWE-352')\n details = \"\"\"\n*A [Cross-site request forgery|https://cwe.mitre.org/data/definitions/352.html] attack is possible here*!\nAn attacker can make a request on behalf of a current Wikia user.\n\nPlease refer to [documentation on Confluence|https://fandom.atlassian.net/wiki/display/SEC/Cross-Site+Request+Forgery] on how to protect your code.\n\n*Transaction*: {{{{{transaction}}}}}\n*Action performed*: {{{{{action_performed}}}}}\n*Token checked*: {token_checked}\n*HTTP method checked*: {method_checked}\n\"\"\".format(\n transaction=context.get('transaction'), # e.g. api/nirvana/CreateNewWiki\n action_performed=context.get('hookName'), # e.g. WikiFactoryChanged\n token_checked='checked' if context.get('editTokenChecked') is True else '*not checked*',\n method_checked='checked' if context.get('httpMethodChecked') is True else '*not checked*',\n )\n\n # format the report\n full_message = self.FULL_MESSAGE_TEMPLATE.format(\n message=message,\n details=details.strip(),\n backtrace=self._get_backtrace_from_exception(exception, offset=5) # skip backtrace to CSRFDetector\n ).strip()\n\n description = self.REPORT_TEMPLATE.format(\n env=self._get_env_from_entry(entry),\n source_host=entry.get('@source_host', 'n/a'),\n context_formatted=json.dumps(entry.get('@context', {}), indent=True),\n fields_formatted=json.dumps(entry.get('@fields', {}), indent=True),\n full_message=full_message,\n url=self._get_url_from_entry(entry) or 'n/a'\n ).strip()\n\n report = Report(\n summary=message,\n description=description,\n label=self.REPORT_LABEL\n )\n\n # add security issue specific labels\n [report.add_label(label) for label in labels]\n\n return report", "def pytest_runtest_makereport(item, call):\n if \"incremental\" in item.keywords:\n if call.excinfo is not None:\n parent = item.parent\n parent._previousfailed = item", "def __init__(self, filename, message, lineno=None, addenda=None, **kwargs):\n Exception.__init__(self, **kwargs)\n self.filename = filename\n self.message = message\n self.lineno = lineno\n self.addenda = addenda", "def create_exception(self, msg):\n return Exception(msg)", "def save_exception(exc):\n LOG.error(\"Error - %s\", str(exc))\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n data = (str(exc)+traceback.format_exc())\n\n file = open(\"./logs/ERROR_\"+threading.currentThread().getName()+today+\".log\",'a+') #Replace to fix OSError\n file.write(\"\\n==\"+hour+\"==\\n\")\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish a registration to the core, listing the API commands.
def register_to_core(self): self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))
[ "def register_routes(self, api):\n # Device Registration\n api.add_resource(controllers.UserDeviceRegistration, '/device-registration')", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def auto_discover():\n auto_registration(\"actions\")", "def register(self):\n\n discovery_subscription = (\n \"topic=ftrack.action.discover and source.user.username={0}\"\n ).format(self.session.api_user)\n\n self.session.event_hub.subscribe(\n discovery_subscription,\n self._discover,\n priority=self.priority\n )\n\n launch_subscription = (\n \"topic=ftrack.action.launch\"\n \" and data.actionIdentifier={0}\"\n \" and source.user.username={1}\"\n ).format(\n self.identifier,\n self.session.api_user\n )\n self.session.event_hub.subscribe(\n launch_subscription,\n self._launch\n )", "def register(self):\n pass", "def register_commands(self):\n # Register the public commands\n say_command_manager.register_commands(\n (self.name, '!' + self.name),\n _send_command_menu,\n )\n\n # Register the private command\n say_command_manager.register_commands(\n '/' + self.name,\n _send_command_menu,\n )\n\n # Register the client command\n client_command_manager.register_commands(\n self.name,\n _send_command_menu,\n )", "def deployer_register(registry):\n registry.register_plugin('command', Command)\n registry.register_plugin('continue', Continue)\n registry.register_plugin('echo', Echo)\n registry.register_plugin('env', Env)\n registry.register_plugin('fail', Fail)\n registry.register_plugin('matrix', Matrix)\n registry.register_plugin('set', Set)\n registry.register_plugin('shell', Shell)\n registry.register_plugin('stage', Stage)", "def registered(self):\n log.info(\"Registered.\")\n pass", "def register(self, command, description, function, params=[]):\n return self.app.commands.register(command, description, function, params, self.plugin)", "def register_resources(api):\n api.add_resource(discourse.AllEntitiesSentimentAndCountsService,\n '/Wiki/<string:wiki_id>/Entities/All/SentimentAndCounts')\n api.add_resource(discourse.entities.WikiEntitiesService,\n '/Wiki/<string:wiki_id>/Entities/Wikia/Counts')\n api.add_resource(discourse.entities.WpWikiEntitiesService,\n '/Wiki/<string:wiki_id>/Entities/Wikipedia/Counts')\n api.add_resource(discourse.entities.CombinedWikiEntitiesService,\n '/Wiki/<string:wiki_id>/Entities/All/Counts')\n api.add_resource(discourse.entities.TopEntitiesService,\n '/Wiki/<string:wiki_id>/Entities/Wikia/Top')\n api.add_resource(discourse.entities.WpTopEntitiesService,\n '/Wiki/<string:wiki_id>/Entities/Wikipedia/Top')\n api.add_resource(discourse.entities.CombinedTopEntitiesService,\n '/Wiki/<string:wiki_id>/Entities/All/Top')\n api.add_resource(discourse.entities.WikiPageEntitiesService,\n '/Wiki/<string:wiki_id>/Pages/Entities/Wikia/Counts')\n api.add_resource(discourse.entities.WpWikiPageEntitiesService,\n '/Wiki/<string:wiki_id>/Pages/Entities/Wikipedia/Counts')\n api.add_resource(discourse.entities.CombinedWikiPageEntitiesService,\n '/Wiki/<string:wiki_id>/Pages/Entities/All/Counts')\n api.add_resource(discourse.entities.WikiPageToEntitiesService,\n '/Wiki/<string:wiki_id>/Pages/Entities/Wikia')\n api.add_resource(discourse.entities.WpPageToEntitiesService,\n '/Wiki/<string:wiki_id>/Pages/Entities/Wikipedia')\n api.add_resource(discourse.entities.CombinedPageToEntitiesService,\n '/Wiki/<string:wiki_id>/Pages/Entities/All')\n api.add_resource(syntax.WikiToPageHeadsService,\n '/Wiki/<string:wiki_id>/Pages/Heads')\n api.add_resource(syntax.HeadsCountService,\n '/Wiki/<string:wiki_id>/Heads/Counts')\n api.add_resource(syntax.TopHeadsService,\n '/Wiki/<string:wiki_id>/Heads/Top')\n api.add_resource(discourse.entities.EntityDocumentCountsService,\n '/Wiki/<string:wiki_id>/Pages/Entities/Wikia/DocumentCounts')\n api.add_resource(discourse.entities.WpEntityDocumentCountsService,\n '/Wiki/<string:wiki_id>/Pages/Entities/Wikipedia/DocumentCounts')\n api.add_resource(discourse.entities.CombinedDocumentEntityCountsService,\n '/Wiki/<string:wiki_id>/Pages/Entities/All/DocumentCounts')\n api.add_resource(discourse.entities.EntitiesService,\n '/Doc/<string:doc_id>/Entities/Wikia')\n api.add_resource(discourse.entities.WpEntitiesService,\n '/Doc/<string:doc_id>/Entities/Wikipedia')\n api.add_resource(discourse.entities.CombinedEntitiesService,\n '/Doc/<string:doc_id>/Entities/All')\n api.add_resource(discourse.entities.EntityCountsService,\n '/Doc/<string:doc_id>/Entities/Wikia/Counts')\n api.add_resource(discourse.entities.WpEntityCountsService,\n '/Doc/<string:doc_id>/Entities/Wikipedia/Counts')\n api.add_resource(discourse.entities.CombinedEntityCountsService,\n '/Doc/<string:doc_id>/Entities/All/Counts')\n api.add_resource(discourse.sentiment.DocumentSentimentService,\n '/Doc/<string:doc_id>/Sentiment')\n api.add_resource(discourse.sentiment.DocumentEntitySentimentService,\n '/Doc/<string:doc_id>/Entities/All/Sentiment')\n api.add_resource(discourse.sentiment.WpDocumentEntitySentimentService,\n '/Doc/<string:doc_id>/Entities/Wikipedia/Sentiment')\n api.add_resource(discourse.sentiment.WikiaDocumentEntitySentimentService,\n '/Doc/<string:doc_id>/Entities/Wikia/Sentiment')\n api.add_resource(document_access.ParsedXmlService,\n '/Doc/<string:doc_id>/XML')\n api.add_resource(syntax.AllNounPhrasesService,\n '/Doc/<string:doc_id>/NPs')\n api.add_resource(syntax.AllVerbPhrasesService,\n '/Doc/<string:doc_id>/VPs')\n api.add_resource(syntax.HeadsService,\n '/Doc/<string:doc_id>/Heads')\n api.add_resource(discourse.entities.CoreferenceCountsService,\n '/Doc/<string:doc_id>/CorefererenceCounts')", "def register_routes(self, api):\n api.add_resource(controllers.UserRegistration, '/registration')\n api.add_resource(controllers.UserLogin, '/login')\n api.add_resource(controllers.TokenRefresh, '/token/refresh')", "def _register_cli(self) -> None:\n api_cli = APICli()\n api_cli.coresys = self.coresys\n\n self.webapp.add_routes(\n [\n web.get(\"/cli/info\", api_cli.info),\n web.get(\"/cli/stats\", api_cli.stats),\n web.post(\"/cli/update\", api_cli.update),\n ]\n )", "async def _register_command(self) -> JSON:\n loop = asyncio.get_event_loop()\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url=InteractionRoute().application(self._application_id).commands(self._id).url,\n json=self._data\n ) as response:\n interaction: JSON = await response.json(encoding='utf-8')\n return interaction", "async def publish_command(self, it: List[dict] = None):\n return await SlashCommand.create_global_commands(\n self.bot.user.id, self.bot.http.token, *it or self._publish\n )", "def register(self):\n self._register_dockyard()\n self._register_docker()", "def info():\n click.secho(\"Registered Drift deployable plugins:\")\n\n # setuptools distribution object:\n # http://setuptools.readthedocs.io/en/latest/pkg_resources.html#distribution-objects\n # 'activate', 'as_requirement', 'check_version_conflict', 'clone', 'egg_name', 'extras',\n # 'from_filename', 'from_location', 'get_entry_info', 'get_entry_map', 'has_version',\n # 'hashcmp', 'insert_on', 'key', 'load_entry_point', 'location', 'parsed_version',\n # 'platform', 'precedence', 'project_name', 'py_version', 'requires', 'version'\n\n # setuptools entry point object:\n # http://setuptools.readthedocs.io/en/latest/pkg_resources.html#entrypoint-objects\n # 'attrs', 'dist', 'extras', 'load', 'module_name', 'name', 'parse', 'parse_group',\n # 'parse_map', 'pattern', 'require', 'resolve'\n\n ts = get_default_drift_config()\n click.echo(\"List of Drift deployable plugins in \", nl=False)\n _header(ts)\n deployables = ts.get_table('deployable-names')\n\n click.secho(\"Deployables and api routes registered in config:\\n\", bold=True)\n\n def join_tables(master_table, *tables, **search_criteria):\n \"\"\"\n Joins rows from 'tables' to the rows of 'master_table' and returns them\n as a single sequence.\n 'search_criteria' is applied to the 'master_table'.\n \"\"\"\n rows = master_table.find(search_criteria)\n for row in rows:\n row = row.copy()\n for table in tables:\n other = table.get(row)\n if other:\n row.update(other)\n yield row\n\n\n tabulate(\n ['deployable_name', 'api', 'requires_api_key', 'display_name', 'tags'],\n list(join_tables(deployables, ts.get_table('routing'), ts.get_table('deployable-names'))),\n indent=' ',\n )\n registered = [d['deployable_name'] for d in deployables.find()]\n\n click.secho(\"\\nDeployables registered as plugins on this machine:\\n\", bold=True)\n for d in _enumerate_plugins('drift.plugin', 'register_deployable'):\n dist, meta, classifiers, tags = d['dist'], d['meta'], d['classifiers'], d['tags']\n click.secho(dist.key, bold=True, nl=False)\n entry = deployables.get({'deployable_name': dist.key})\n if entry:\n click.secho(\"\")\n else:\n click.secho(\" (Plugin NOT registered in config DB!)\", fg='red')\n\n if dist.key in registered:\n registered.remove(dist.key)\n\n assigned = ts.get_table('deployables').find({'deployable_name': dist.key})\n if assigned:\n click.secho(\"\\tTier assignment:\")\n for assignment in assigned:\n if 'version' in assignment:\n click.secho(\"\\t\\t{tier_name} [{version}]\".format(**assignment), nl=False)\n else:\n click.secho(\"\\t\\t{tier_name}\".format(**assignment), nl=False)\n if assignment['is_active']:\n click.secho(\"\")\n else:\n click.secho(\" [inactive]\", fg='white')\n\n click.secho(\"\\tTags: {}\".format(', '.join(tags)))\n click.secho(\"\\tVersion: {}\".format(dist.parsed_version))\n\n if meta:\n for key in ['Author', 'Summary']:\n if key in meta:\n click.secho(\"\\t{}:{}\".format(key, meta[key]))\n for classifier in classifiers:\n if 'Programming Language' in classifier and classifier.count('::') == 1:\n click.secho(\"\\t{}\".format(classifier))\n else:\n click.secho(\"\\t(meta info missing)\")\n click.secho(\"\")\n\n if registered:\n click.secho(\"Note! The following deployables are registered in the config, but are not \"\n \"registered as plugins on this machine:\\n{}\".format(', '.join(registered)))\n\n click.secho(\"\\nDeployables assigned to tiers:\\n\", bold=True)\n ta = {}\n for d in ts.get_table('deployables').find():\n ta.setdefault(d['tier_name'], []).append(d)\n for tier_name, dep in ta.items():\n click.secho(\"{}:\".format(tier_name), bold=True)\n for d in dep:\n click.secho(d['deployable_name'], fg='black' if d['is_active'] else 'red', nl=False)\n click.secho(\" \", nl=False)\n click.secho(\"\\n\")", "def register_commands(self):\n\n with open(self._full_register_name, 'r') as file_to_read:\n command_register = json.loads(file_to_read.read())\n\n commands = command_register.get(\"commands\")\n if commands is None:\n logging.error(\"Command register is incorrect\")\n return []\n\n command_objects = []\n\n for command in commands:\n module_name = command.get(\"module\")\n class_name = command.get(\"class_name\")\n\n if (module_name is None) or (class_name is None):\n logging.error(\"Commands in the register are described in incorrect way.\")\n raise KeyError()\n\n try:\n command_module = importlib.import_module(module_name)\n command_class = getattr(command_module, class_name)\n command_object = command_class()\n command_objects.append(command_object)\n except ModuleNotFoundError as e:\n logging.error(\"Command modules specified in the register are not found!\")\n raise e\n\n return command_objects", "def register_cli(cls):\n for cmd in cls.SUB_GROUP_COMMANDS:\n getattr(cls, cls.SUB_GROUP_NAME).add_command(getattr(cls, cmd))", "def test_get_apiregistration_v1_api_resources(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subscribe to the queue matching the instance's name. Pass the command to the process_command function.
def subscribe_to_commands(self): self.basic_consume(self.process_command, queue=self.name)
[ "def queue_command(self, task_instance, command, priority=1, queue=None):\n super(MesosExecutor, self).queue_command(\n task_instance, command, priority, queue)\n\n key = task_instance.key\n if key not in self.queued_task_instances and key not in self.running:\n self.queued_task_instances[key] = task_instance", "def send_command(self, command):\n self.queue.put(command)", "def add_command(self, cmd):\n self.command_queue.put(cmd)", "def Enqueue(self, command):\n\n self.queue.put(command)", "def command(self, command_string):\n self.__command_queue.append(command_string)", "def execute(self, command: str):\n self._command_queue.put_nowait(command)", "def send_command(self, command):\n if self.is_iina_start:\n print(\"put command to queue: {}\".format(command))\n self.commond_queue.put(command)", "def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)", "def enqueue(self, cmd) -> None:\n self.general_queue.append(cmd)", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def addCommand(self, command):\n self.cmdQueue.append(command)\n self.sema.release()", "def enqueue(self, command):\n\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n q = []\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if command not in q:\n q.append(command)\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()", "def on_queued_command(self, event, index=None):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n api_loop(\n event.channel.send_message,\n \"There aren't any songs queued right now.\",\n )\n elif index is None:\n api_loop(\n event.channel.send_message,\n \"There are {} songs queued ({} minutes). To get a specific song's info, just do this command + index.\".format(\n len(self.get_player(event.guild.id).queue),\n self.minutes_format(self.get_player(\n event.guild.id,\n ).queue_length),\n ),\n )\n elif (index.replace(\"-\", \"\").strip(\" \").isdigit() and\n 0 <= (int(index.replace(\"-\", \"\").strip(\" \")) - 1) <=\n len(self.get_player(event.guild.id).queue)):\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[\n int(index.replace(\"-\", \"\").strip(\" \")) - 1\n ].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"The song at index ``{}`` is ``{}`` by ``{}`` with length ``{}`` minutes and is sourced from ``{}``.\".format(\n int(index.replace(\"-\", \"\").strip(\" \")),\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n elif index.replace(\"-\", \"\").isdigit():\n api_loop(event.channel.send_message, \"Invalid index input.\")\n else:\n matched_list = dict()\n for item in self.get_player(event.guild.id).queue:\n ratio = partial_ratio(item.metadata[\"title\"], index)\n if ratio >= 70:\n matched_list[\"#{} ({}% match)\".format(\n self.get_player(event.guild.id).queue.index(item)+1,\n ratio,\n )] = item.metadata[\"title\"]\n if matched_list:\n embed = bot.generic_embed_values(\n title=\"Queue search results\",\n footer_text=\"Requested by {}\".format(event.author),\n non_inlines={\n k: matched_list[k] for k in list(matched_list)[-25:]\n },\n footer_img=event.author.get_avatar_url(size=32),\n timestamp=event.msg.timestamp.isoformat(),\n )\n api_loop(event.channel.send_message, embed=embed)\n else:\n api_loop(\n event.channel.send_message,\n \"No similar items found in queue.\",\n )", "def icpw_register_command_queue(self, queue):\n\n with self._command_queue_lock:\n if self._command_queue is not None:\n raise RuntimeError('Command queue may not be set more than once in a node')\n\n self._command_queue = queue\n\n for item in self._command_queue_buffer:\n self._command_queue.put(item)\n\n self._command_queue_buffer = None\n\n for _name, timer in iter_timer_objects(self):\n item = ScheduleQueueItem(timer.bind(self), repeat_sec=timer.seconds)\n self.icpw_enqueue_command(item)", "def incoming_message(self, message):\n self.message_queue.put(message)", "def receive(self, message, **kwargs):\n self.queue.put((message, kwargs))", "def _queue_cmd(self, cmd, callback=None):\n future = _FutureCommand(cmd, callback)\n self._queue.put_nowait(future)\n return future", "def queue_scan_command(self, server_info, scan_command):\r\n # type: (ServerConnectivityInfo, PluginScanCommand) -> None\r\n # Ensure we have the right processes and queues in place for this hostname\r\n self._check_and_create_process(server_info.hostname)\r\n\r\n # Add the task to the right queue\r\n self._queued_tasks_nb += 1\r\n if scan_command.is_aggressive:\r\n # Aggressive commands should not be run in parallel against\r\n # a given server so we use the priority queues to prevent this\r\n self._hostname_queues_dict[server_info.hostname].put((server_info, scan_command))\r\n else:\r\n # Normal commands get put in the standard/shared queue\r\n self._task_queue.put((server_info, scan_command))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the chat client application loop. When this function exists, the application will stop
def run_chat_client(): while must_run: print_menu() action = select_user_action() perform_user_action(action) print("Thanks for watching. Like and subscribe! 👍")
[ "def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C received\")\n self.irc.disconnect_all(\"even de suiker bijvullen\")\n \n self.logger.info(\"Finished disconnecting, shutting down\")", "def startListening(self):\n \n self.listener_thread = threading.Thread(target=self.listening, daemon=True)\n self.listener_thread.start()\n\n # stateupdate = threading.Thread(target=self.showStatus, daemon=True)\n # stateupdate.start()\n\n # Main App Loop (Keeps the Client opened)\n while self.listener_thread.is_alive():\n time.sleep(1)\n else:\n print('Shutting Main Thread-1')\n sys.exit()", "def run(self):\n\n if self.purpose == \"send_messages\":\n self.start_outgoing_messages_loop()\n if self.purpose == \"receive_messages\":\n self.start_incoming_messages_loop()", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()", "def run_loop(self):\r\n server_log.info('Server now accepting client connections.')\r\n while not self.clients_done():\r\n asyncore.loop(timeout=config[\"server_timeout\"], count=config[\"server_loop_count\"])", "def start_loop(self):\n\n self.MainLoop()", "def run(self):\r\n try:\r\n self.listen()\r\n except socket.error:\r\n self.close()", "def mainloop(self):\n \n while True:\n # It calls repeteadly the reactor\n # update method.\n try:\n self.update()\n except Kill:\n # It breaks the loop\n # silently.\n # people implementing reactors from other mainloop\n # should implement this try: catch\n # suitably to their needs.\n\n break\n except KeyboardInterrupt:\n print self.base\n raise", "def run(self):\r\n self.client.connect()\r\n self.client.run()", "def run(self) -> None:\n self.mainloop()", "def message_loop(current_user: object, username: str, topic: str) -> None:\n user_input = input(\"Welcome to the chatroom, \" + username + \"\\nThe topic is \" +\n topic + \"\\nType 'quit' to disconnect\\n\")\n while user_input != 'quit':\n send_message(current_user, username, topic, user_input)\n user_input = input() \n disconnect(current_user, topic)\n return", "def main(self):\n gtk.gdk.threads_init()\n self.client.start()\n gtk.main()", "def run(self):\n log('Now running')\n\n self.last_action = 'Stop'\n\n while True:\n msg = self.server.receive()\n self.__process__(msg)", "def start_event_loop(self):", "def run(self):\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n self.client.on_subscribe = self.on_subscribe\n self.client.on_publish = self.on_publish\n # self.client.on_log = self.on_log\n # self.client.enable_logger(logger)\n self.client.reconnect_delay_set(min_delay=0.3, max_delay=120)\n try:\n self.client.connect(self.broker_adress, int(self.broker_port), 60)\n except:\n txt = 'Broker with ip-adress {} on port {} not found'.format(self.broker_adress, self.broker_port)\n logger.error(txt)\n raise Exception(txt)\n # sys.exit()\n # self.client.loop_forever()\n self.client.loop_start()", "def handle_chat(self):\n while True:\n if self.chat_breakout:\n return\n\n time.sleep(1)\n messages = \"\"\n for i in range(5):\n try:\n messages += f\"{self.queue.popleft()}\\n\"\n except IndexError:\n # Queue is empty but no worries\n continue\n\n if messages != \"\":\n self.loop.create_task(\n self.ingame_cog.send_chat_to_discord(\n self.bot, self.channel, messages\n )\n )", "def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r", "def start_event_loop(self, application):\n import sys\n sys.exit( application.exec_() )", "def mainloop(self) -> None:\n while True:\n try:\n updates = self.get_updates_list()\n except BadResponseError:\n continue\n\n for update in updates:\n try:\n update = self.get_update_object(update)\n except UnsupportedMessageType:\n self.log_write(\"Unsupported message type! JSON:\\n\" + str(update.message_json))\n self.send_message(update.chat_id, \"What is it?\")\n self._inc_offset()\n continue\n\n if update.message_type == TEXT:\n self.on_text(update)\n\n elif update.message_type == STICKER:\n self.log_write(update.user_name + \" sent sticker\")\n self.on_sticker(update)\n\n else:\n self.send_message(update.chat_id, \"Use /help\")\n\n self._inc_offset()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the menu showing the available options
def print_menu(): print("==============================================") print("What do you want to do now? ") print("==============================================") print("Available options:") i = 1 for a in available_actions: if current_state in a["valid_states"]: # Only hint about the action if the current state allows it print(" %i) %s" % (i, a["description"])) i += 1 print()
[ "def _print_menu(self):\n # Create header line.\n header = \"%s Menu:\" % (self.__name)\n header = header.title()\n print(header)\n\n # Show the iterations counter.\n iterations = self._status.get_value(\"iterations\")\n print(\"(Iteration %d)\" % (iterations))\n\n self._print_custom()\n\n # Display the options alphabetically.\n option_names = list(self.__options.keys())\n option_names.sort()\n for option in option_names:\n desc, command = self.__options[option]\n print(\"\\t%s: %s\" % (option, desc))", "def print_admin_options():\n print(\"\\n******************* ADMINISTRATOR'S MENU *****************\"\n \"\\n\\nChoose a number from 1 to 4 from the below options:\"\n \"\\n\\n# 1: Users management\\n\\n# 2: Class management\\n\\n# 3: School information\\n\\n# 4: Quit\"\n \"\\n**********************************************************\")", "def print_menu():\n print()\n print(\"Main Menu\")\n print(\"---------\")\n print(\"1 - Process a new data file\")\n print(\"2 - Choose units\")\n print(\"3 - Edit room filter\")\n print(\"4 - Show summary statistics\")\n print(\"5 - Show temperature by date and time\")\n print(\"6 - Show histogram of temperatures\")\n print(\"7 - Quit\")\n print()", "def menuDisplay(self):\r\n \r\n print(\"\\n\" \"Select the items from menu\")\r\n print(\"\\n\" \"********** MENU CARD **********\")\r\n print(\"ITEM \\t\\t PRICE\")\r\n\r\n self.menu = {\"Burger\":120, \"Pizza\":200, \"Hotdog\":150,\r\n \"Parata\":50, \"Donut\":80, \"Nugget\":50,\r\n \"Paneer\":120, \"Pakora\":80, \"Coffee\":25}\r\n\r\n for k,v in self.menu.items():\r\n print(k,\":\", \"\\t\", \"Rs\",v)", "def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')", "def menu_cust(self):\n intro = \"Here are the options available for you to choose from:\"\n option1 = \"[1] UNLOCK THE CAR\"\n option2 = \"[2] RETURN THE CAR\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')", "def print_menu(options, header=\"\"):\n if header != \"\":\n print(header)\n print(\"-\" * len(header))\n i = 1\n for option in options:\n print(str(i) + \". \" + option)\n i += 1", "def print_user_options():\n print(\"\\n******************* USER'S MENU *****************\"\n \"\\n\\nChoose a number from 1 to 2 from the below options:\"\n \"\\n\\n# 1: Show all teachers\\n\\n# 2: Quit\"\n \"\\n*************************************************\")", "def print_startup_menu():\n menu = \"Menu: \\n\"\n for key in sorted(const.USER_MENU_OPTIONS):\n temp_dict = const.USER_MENU_OPTIONS.get(key)\n menu += key + \": \" + temp_dict.get(const.USER_MENU_OPTION_HELP_TEXT_KEY) + \"\\n\"\n print(menu)", "def print_teacher_options():\n print(\"\\n******************* TEACHER'S MENU *****************\"\n \"\\n\\nChoose a number from 1 to 3 from the below options:\"\n \"\\n\\n# 1: Show all study classes\\n\\n# 2: Show all teachers\\n\\n# 3: Quit\"\n \"\\n\\n****************************************************\")", "def analysis_menu():\n print()\n print(\"Menu Options:\")\n print(\"-------------\")\n print(\"Enter \\\"B\\\" for book page information\\nEnter \\\"S\\\" for statistical analysis\\nEnter \\\"A\\\" for additional analysis\\nEnter \\\"V\\\" for visualizations\\nEnter \\\"F\\\" to save results to a file\\nEnter \\\"M\\\" to return to main menu\")", "def menu_eng(self):\n intro = \"Here are the options available for you to choose from\"\n option1 = \"[1] UNLOCK BY CREDENTIALS\"\n option2 = \"[2] UNLOCK BY QR CODE\"\n option3 = \"[3] UNLOCK WITH BLUETOOTH\"\n option4 = \"[4] BACK\"\n print(intro, option1, option2, option3, option4, sep='\\n')", "def main_menu():\n print()\n print(\"Menu Options:\")\n print(\"-------------\")\n print(\"Enter \\\"A\\\" for analysis\\nEnter \\\"H\\\" for help\\nEnter \\\"Q\\\" to quit\")", "def display_menu(self):\n print 57 * '#'\n print '# WELCOME TO THE PYTHON/WEBDEV WALL #'\n print '# ---------------------------------------------- #'\n print '# Here, we will dial in on your python skills by #'\n print '# providing an interactive and fun way to really #'\n print '# understand how this language functions. #'\n print 57 * '#'\n print ''", "def allOptions():\r\n\tprint( )\r\n\ttime.sleep(0.1)\r\n\tprint( \"(1) Guest Information\" )\r\n\ttime.sleep(0.1)\r\n\tprint( \"(2) Make a Reservation\" )\r\n\ttime.sleep(0.1)\r\n\tprint( \"(3) Print Bill\" )\r\n\ttime.sleep(0.1)\r\n\tprint( \"(4) Quit!\" )\r\n\ttime.sleep(0.1)", "def display_menu():\n print(\"\"\"\n Menu of Actions:\n 1) Send a Thank You\n 2) Create a Report\n 3) Quit\n \\n\"\"\")", "def print_class_menu():\n print(\"\\n*********** INFORMATION ABOUT STUDY CLASSES *********\"\n \"\\n\"\n \"\\nChoose a number from 1 to 5 from the below options:\"\n \"\\n\"\n \"\\n# 1: Show all study classes\\n\\n# 2: Create new study class\\n\\n# 3: Edit study class name\"\n \"\\n\\n# 4: Delete study class\\n\\n# 5: Quit\"\n \"\\n*****************************************************\")", "def printCurrentOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)", "def mostrar_menu():\n print('------\\nMenú principal \\n------')\n imprimir_opciones(LISTA_OPCIONES)\n print('------')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ask the user to choose and action by entering the index of the action
def select_user_action(): number_of_actions = len(available_actions) hint = "Enter the number of your choice (1..%i):" % number_of_actions choice = input(hint) # Try to convert the input to an integer try: choice_int = int(choice) except ValueError: choice_int = -1 if 1 <= choice_int <= number_of_actions: action = choice_int - 1 else: action = None return action
[ "def take_action(self, action_index):\n assert action_index < len(self.ACTION_LOOKUP)\n action = self.ACTION_LOOKUP[action_index]\n # print(action)\n return", "def _take_action(self, action_index):\n assert action_index < len(self.ACTION_LOOKUP)\n self.action = action_index\n return self.action", "def choose_action(self, board, possible_actions):\r\n pass", "def choose_action(self, state):\n pass", "def get_action(self, choice):\n return self.get_option(choice).action", "def choose_action(self, valid_list):\n \n action_str = input(\"Choose action: \").lower()\n print()\n \n if action_str in valid_list:\n return action_str\n \n else:\n print(\"Invalid action!\")\n return False", "def choose_action(self, node):", "def action(self,input,session,context):\n #index = int(input) - 1\n #if index < 0:\n # raise IndexError('Menu option can not be less than 1')\n def make_index(elt):\n idx, item = elt\n if item.custom_index is not None: return str(item.custom_index)\n else: return str(idx)\n\n valid_inputs = map(make_index, enumerate(self.menu_items))\n index = valid_inputs.index(input)\n\n return self.menu_items[index].next_screen", "def select_action(self, suggested_action):\n raise NotImplementedError", "def ChooseAction(self):\n self.lastAction = None\n self.lastState = None\n if(self.attention is None or self.attention == \"\"): return\n # find best action for the currently attended node\n actions = list(self.vi.Q[self.states.index(self.attention)])\n actionIndex = actions.index(max(actions))\n actionName = self.actions[actionIndex]\n # execute the best action for the currently attended node\n self.nodes[actionName].Activate()\n self.lastAction = actionName\n self.lastState = self.attention", "def index_to_action(self, index):\n assert 0 <= index < self.total_actions\n return self.actions_list[index]", "def action(self,input,session,context):\n index = int(input) - 1\n if index < 0:\n raise IndexError('Menu option can not be less than 1')\n return self.menu_items[index].next_screen", "def call_action(self, input):\n if input == 'field':\n self.new_field()\n elif input == 'harvest':\n self.harvest()\n elif input == 'status':\n self.status()\n elif input == 'relax':\n self.relax()\n elif input == 'pasture':\n self.pasture()\n elif input == 'exit':\n quit()\n else:\n print(\"Invalid input, please enter one of the listed options.\")\n pass", "def onActionChosen(self, agent, action):\n\n pass", "def getSelectedAction(self):\n pass", "def pick_action(self):\n print(\"\")\n print(\"Current: \" + self.current if Path(self.current).exists() else \"Current config file doesn't exist.\")\n print(\"Pacnew: \" + self.pacnew if Path(self.pacnew).exists() else \"Pacnew/pacsave file doesn't exist.\")\n\n response = self.action_picker.pick()[\"key\"]\n\n if response == \"q\":\n self._quit()\n elif response == \"p\":\n self.pick_file()\n elif response == \"rc\":\n run(self._get_cmd(\"rm\") + [self.current])\n elif response == \"rp\":\n run(self._get_cmd(\"rm\") + [self.pacnew])\n elif response == \"eb\":\n run(self._get_cmd(\"edit\") + [self.current, self.pacnew])\n elif response == \"ec\":\n run(self._get_cmd(\"edit\") + [self.current])\n elif response == \"en\":\n run(self._get_cmd(\"edit\") + [self.pacnew])\n elif response == \"r\":\n run(self._get_cmd(\"mv\") + [self.pacnew, self.current])\n elif response == \"d\":\n run(self._get_cmd(\"diff\") + [self.pacnew, self.current])", "def choice_stay_return(self, text, action):\n while True:\n print(\"\"\"\n 0. Back to the main menu\n 1. {}\n \"\"\".format(text))\n choice = pyip.inputNum('Enter a number: ')\n if choice == 0:\n # Clean up the console\n self.clear_console()\n # Gives the options that can be selected in the menu\n self.run()\n elif choice == 1:\n action()\n else:\n print('Please, choose number 0 or 1')", "def act(self, state):\n suggested_action = self.learning_element.act(state)\n selected_action = self.exploration_element.select_action(suggested_action)\n return selected_action", "def action(self, e):\n if \"SELECTION\" in self.action_functions:\n self.action_functions[\"SELECTION\"]()\n for event in e:\n if event.type in self.action_functions:\n self.action_functions[event.type]()\n if event.type == KEYDOWN:\n if event.key in self.action_functions:\n self.action_functions[event.key]()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input `text` into the text field on the page.
def enter_text(self, text): self.q(css='#fixture input').fill(text)
[ "def set_text(self, text):\n self.text = text", "def ui_input_text() -> str:\n\ttext = input('enter your text ')\n\treturn text", "def input_text_basic(self, locator, text):\n self.element = self._element_finder(locator)\n if self.element:\n self.element.send_keys(text)\n log.mjLog.LogReporter(\"WebUIOperation\",\"debug\",\"input_text_basic operation successful- %s\" %(locator))", "def input_text(self,locator, Text):\n self.element = self._element_finder(locator)\n if self.element:\n self.element.clear()\n self.element.send_keys(Text)\n log.mjLog.LogReporter(\"WebUIOperation\",\"debug\",\"Input_text operation \\\n successful- %s\" %(locator))", "def type_text(self, element, text):\n try:\n if element.is_displayed():\n element.clear()\n element.send_keys(text)\n print(text + \" is added to textbox\")\n else:\n print(element + \" is not displaying\")\n except Exception as e:\n print(str(e))", "def text(self, text):\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\")\n\n self._text = text", "def text(self, selector, text):\n\n self.interface.dispatch({ 'name': 'text', 'selector': selector, 'text': text })", "def inp(text):\r\n input(text)", "def text(self, text, enter=True):\n self.ime.text(text)\n\n if enter:\n self.adb.shell_command(\"input keyevent ENTER\")", "def set_input_line(self, text):\n self.cont.getControl(\"edit_input\").getModel().Text = text", "def __send_text_in_browser(self, text):\n\n actions = ActionChains(self.browser)\n actions.send_keys(text)\n actions.perform()", "def text_value(self, text_value):\n\n self._text_value = text_value", "def add_text(self, text):\n self.text = self.text + text", "def add_text(self, text):\n if self.current_node:\n #if text != \"\":\n self.current_node.params[\"text\"] = text", "def enter_into_field(field_name, text):\n input_el = finder.find_element_by_name(field_name)\n _clear_and_send_keys_to_element(input_el, text)", "def set_text(self, text):\n self.widget.setText(text)", "def set_text(self, text: Union[str, int, float]):\n if type(text) == int or type(text) == float:\n text = str(text)\n self._text = text\n self.dirty = 1", "def add_text(self, text):\n placeholder = self.page.placeholders.get(slot='content')\n add_plugin(placeholder, 'TextPlugin', 'en', body=text)\n # Immediately publish the changes\n publish_page(self.page, self.admin, 'en')", "def displayText(self):\n if self.entryWidget.get().strip() == \"\":\n tkMessageBox.showerror(\"Tkinter Entry Widget\", \"Enter a text value\")\n else:\n self.file_com.write(self.entryWidget.get().strip()+'\\n')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select the car with ``car_value`` in the dropdown list.
def select_car(self, car_value): self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
[ "def select_by_value(self, value):\n if value:\n self.browser.selenium.find_element_by_xpath(f\"//select/option[@value='{value}']\").click()", "def is_car_selected(self, car):\n return self.q(css=u'select[name=\"cars\"] option[value=\"{}\"]'.format(car)).selected", "def select_dropdown_value(self, label, value):\n locator = f\"label:{label}\"\n try:\n element = self.selenium.get_webelement(locator)\n except (NoSuchElementException, TimeoutException, ElementNotFound):\n raise ElementNotFound(f\"Form element with label '{label}' was not found\")\n\n self.salesforce.scroll_element_into_view(locator)\n handler = get_form_handler(element, locator)\n try:\n handler.set(value)\n except (NoSuchElementException, TimeoutException, ElementNotFound):\n raise ElementNotFound(f\"Dropdown value '{value}' not found\")", "def select_option(self, selector, value):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import Select\n\n select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector))\n select.select_by_value(value)", "def select_drop_down_item_by_text(self, value):\n self.wait_element()\n sel = Select(self.web_element())\n sel.select_by_visible_text(value)\n self.wait_until_ajax_complete()", "def select_drop_down(self, drop_down_label_name, value):\n utils.log(\"select value:[{}] for Drop Down Box:[{}]\".format(value, drop_down_label_name))\n self.slb.select_drop_down_list(\n report_schedule.drop_down_trigger.format(drop_down_label_name),\n report_schedule.drop_ul_li,\n value)", "def select_option(select_element, value):\n select = Select(select_element)\n select.select_by_value(value)", "def select_drop(web_driver, id, value):\r\n select = Select(web_driver.find_element_by_id(id))\r\n select.select_by_value(value)\r\n # Returns nothing\r", "def select_drop_down_item_by_index(self, value):\n self.wait_element()\n sel = Select(self.web_element())\n sel.select_by_index(value)\n self.wait_until_ajax_complete()", "def __set__(self, obj, value):\n driver = obj.driver\n element = WebDriverWait(driver, TIMEOUT).until(\n EC.presence_of_element_located((By.NAME, self.locator))\n )\n for option in element.find_elements_by_tag_name('option'):\n if option.text == value:\n option.click()\n break", "def set_selection(self, value):\n if value is not None:\n if is_int(value):\n self.combobox.SetSelection(value)", "def set_select(self, val):\n self.select = val\n return self", "def selectCarPrice(self, car):\n\n # Create a result variable to store the upgrade object.\n # Call the fetchSumOfUpgradePrice in DAO class and pass the selected car registration-\n # number to get the sum of selected car upgrades price.\n upgrade = self.sellerDao.fetchSumOfUpgradePrice(car.getRegNo())\n\n # Get the sum of upgrade price from upgrade object.\n # If the sum of upgrade price is none, then it will change to zero\n if upgrade.getPrice() is None:\n upgradePrice = 0\n else:\n upgradePrice = upgrade.getPrice()\n\n carPrice = self.sellerDao.fetchCarPrice(car.getRegNo())\n\n return upgradePrice + carPrice", "def link_to_choice(dropdown_value):\n return dropdown_value", "def selected_value(self, selected_value):\n for option in self._options_iter:\n if option.value == selected_value:\n self.selected_option = option\n break\n else:\n raise ValueError(\n \"no option with value '{}' found\".format(selected_value)\n )", "def selectOptionByValue(self, element_tuple, select_value):\n self.log_info(f\"Browser.selectOptionByValue: Setting {element_tuple} to {select_value}\")\n Select(self.CORE.find_element(*self.format_element(element_tuple))).select_by_value(select_value)\n return", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def test_select_box():\n with SeleniumDriver(\"firefox\", headless=True) as obj:\n obj.get(TEST_URL)\n\n select_value = \"1\"\n obj.fill({\"select_dropdown\": select_value})\n element = obj.element(\"select_dropdown\", \"name\")\n for ele in element.find_elements_by_tag_name(\"option\"):\n if ele.text == \"One\":\n assert ele.is_selected() is True", "def perform_as(self, the_actor: Actor) -> None:\n if self.target is None:\n raise UnableToAct(\n \"Target was not provided for SelectByValue. Provide a target using the \"\n \".from_() or .from_the() methods.\"\n )\n\n element = self.target.found_by(the_actor)\n select = SeleniumSelect(element)\n try:\n select.select_by_value(self.value)\n except WebDriverException as e:\n msg = (\n \"Encountered an issue while attempting to select the option with value \"\n f\"{self.value} from {self.target}: {e.__class__.__name__}\"\n )\n raise DeliveryError(msg).with_traceback(e.__traceback__)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
def is_car_selected(self, car): return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected
[ "def is_selected(self, term):\n found = self.frame.radio(label=term)\n if found.exists:\n return found.is_selected\n raise UnknownObjectException('Unable to locate radio matching {}'.format(term))", "def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False", "def is_selected(self) -> bool:\r\n return self.selected", "def is_selected(self):\n is_selected = bool(RPR.IsTrackSelected(self.id))\n return is_selected", "def IsSelected(self, item):\r\n\r\n return item.IsSelected()", "def is_select(self):\n return self._is_select", "def selected(self) -> bool:\n return self.__selected", "def is_selected(self, by, value):\n we = self.find_element(by, value)\n return we.is_selected()", "def _can_select(self):\n return self.multiple_selection or len(self.regions) < 1", "def is_selected(self, part):\r\n if isinstance(part, int):\r\n part_id = part \r\n else:\r\n part_id = part.part_id\r\n for selected_part_id in self.selects:\r\n if part_id == selected_part_id:\r\n return True\r\n \r\n return False", "def is_multi_selection(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_multi_selection\")", "def has_selection(self, name=\"default\"):\n return self.get_selection(name) is not None", "def objectIsSelected(self):\n if len(self.options.ids) == 0:\n inkex.errormsg((\"Please select an object.\"))\n exit()\n return True", "def hasSelection(self):\n return bool(self._selection)", "def select(condition: Union[Callable, int], meta: Counter) -> bool:\n if condition is None:\n return True\n elif isinstance(condition, int):\n return sum(meta.values()) == condition\n elif callable(condition):\n if not isinstance(condition(meta), bool):\n raise TypeError('selection condition expected to return a boolean')\n return condition(meta)\n return False", "def add_car(self, car):\n car_coords = car.car_coordinates()\n for coord in car_coords:\n if coord not in self.cell_list(): # not in 7*7 board\n return False\n elif self.cell_content(coord) is not None:\n return False\n for old_car in self.__cars:\n if old_car.get_name() == car.get_name():\n return False\n self.__cars.append(car)\n return True", "def isCarAvailable(self, car, start, end):\n rentals = self.filterRentals(None, car)\n for rent in rentals:\n if start > rent.end or end < rent.start:\n continue\n return False\n return True", "def selected(self):\n\n return self.element().is_selected() if self.exists() else False", "def select_car(self, car_value):\n self.q(css=u'select[name=\"cars\"] option[value=\"{}\"]'.format(car_value)).first.click()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle the box for the pill with `pill_name` (red or blue).
def toggle_pill(self, pill_name): self.q(css=u"#fixture input#{}".format(pill_name)).first.click()
[ "def click_pill(self, pill):\n if pill < len(self.pills):\n return self.pills[pill].click()\n else:\n return f\"No pill {pill} found?\"", "def toggle_color_picker(self, wid, color_name='square_fill_ink'):\n print(\"TOGGLE COLOR PICKER\", getattr(wid, color_name), self.color_picker)\n is_open = self.color_dropdown and self.color_dropdown.attach_to\n if is_open:\n self.color_dropdown.dismiss()\n if self.color_dropdown:\n self.color_picker.unbind(color=wid.setter(color_name))\n self.color_picker = None\n self.color_dropdown = None\n if not is_open:\n self.color_dropdown = Factory.ColorPickerDD()\n self.change_flow(id_of_flow('suggest'))\n self.color_dropdown.open(wid)\n self.color_picker = self.color_dropdown.ids.col_pic\n self.color_picker.color = getattr(wid, color_name)\n self.color_picker.bind(color=wid.setter(color_name))", "def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")", "def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)", "def _checkbutton_toggle(self):\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)", "def led_toggle(self):\n if self.state == GPIO.LOW:\n self.state = GPIO.HIGH\n else:\n self.state = GPIO.LOW\n return self.update_status()", "def _change_color(self):\n\t\tif self.state == LED.ON_STATE:\n\t\t\tcolor=\"green\"\n\t\telse:\n\t\t\tcolor=\"red\"\n\t\tself.c.itemconfig(self.led, fill=color)", "def color_toggle(self):\n \n if self.color_scheme_table.active_scheme_name == 'NoColor':\n self.color_scheme_table.set_active_scheme(self.old_scheme)\n self.Colors = self.color_scheme_table.active_colors\n else:\n self.old_scheme = self.color_scheme_table.active_scheme_name\n self.color_scheme_table.set_active_scheme('NoColor')\n self.Colors = self.color_scheme_table.active_colors", "def pin_toggle(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n port_state = gpio.HIGH\n if gpio.input(port_num) == gpio.HIGH:\n port_state = gpio.LOW\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, port_state)", "def change(self):\n # First, determine which color should be next\n if self.color == 'red':\n new_color = 'green'\n elif self.color == 'green':\n new_color = 'yellow'\n elif self.color == 'yellow':\n new_color = 'red'\n # Next, activate and deactivate the appropriate lamps\n self.lamps[self.color].turn_off()\n self.color = new_color\n self.lamps[self.color].turn_on()", "def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)", "def ToggleSpinner(event, state, widget):\n if state == True:\n widget.Enable()\n else:\n widget.Disable()\n event.Skip()", "def toggle(self):\n if not self.pressed:\n self.button.config(relief=SUNKEN, bg='red')\n self.pressed = True\n self.check_bingo() # bingo is checked every time a button is activated (pressed)\n else: # bingo is not checked when a button has been depressed\n self.button.config(relief=RAISED, bg='white')\n self.pressed = False", "def change_green(self):\n self._lbl.config(text=\"This label is green\", bg=\"green\")", "def create_round_toggle_assets(self, colorname):\n if colorname == DEFAULT:\n prime_color = self.colors.primary\n else:\n prime_color = self.colors.get(colorname)\n\n on_border = prime_color\n on_indicator = self.colors.selectfg\n on_fill = prime_color\n\n if self.is_light_theme:\n off_border = self.colors.selectbg\n off_indicator = self.colors.selectbg\n disabled_fg = Colors.update_hsv(self.colors.inputbg, vd=-0.2)\n else:\n off_border = self.colors.inputbg\n off_indicator = self.colors.inputbg\n disabled_fg = self.colors.inputbg\n\n off_fill = self.colors.bg\n toggle_off = Image.new(\"RGBA\", (226, 130))\n draw = ImageDraw.Draw(toggle_off)\n draw.rounded_rectangle(\n xy=[1, 1, 225, 129],\n radius=(128 / 2),\n outline=off_border,\n width=6,\n fill=off_fill,\n )\n draw.ellipse([20, 18, 112, 110], fill=off_indicator)\n\n toggle_on = Image.new(\"RGBA\", (226, 130))\n draw = ImageDraw.Draw(toggle_on)\n draw.rounded_rectangle(\n xy=[1, 1, 225, 129],\n radius=(128 / 2),\n outline=on_border,\n width=6,\n fill=on_fill,\n )\n draw.ellipse([20, 18, 112, 110], fill=on_indicator)\n toggle_on = toggle_on.transpose(Image.ROTATE_180)\n\n toggle_disabled = Image.new(\"RGBA\", (226, 130))\n draw = ImageDraw.Draw(toggle_disabled)\n draw.rounded_rectangle(\n xy=[1, 1, 225, 129],\n radius=(128 / 2),\n outline=disabled_fg,\n width=6\n )\n draw.ellipse([20, 18, 112, 110], fill=disabled_fg)\n\n image_names = []\n for im in [toggle_on, toggle_off, toggle_disabled]:\n _im = ImageTk.PhotoImage(im.resize((24, 15), Image.LANCZOS))\n _name = _im._PhotoImage__photo.name\n image_names.append(_name)\n self.theme_images[_name] = _im\n\n return image_names", "def toggle_color(self, index):\n if self.get_state(index):\n self.canvas.itemconfigure(self.cells[index], state=HIDDEN)\n else:\n self.canvas.itemconfigure(self.cells[index], state=NORMAL)", "def turn_on(color, duration=None):\n if has_gpio:\n pin = get_pin(color)\n gpio.output(pin, True)\n if duration:\n time.sleep(duration)\n gpio.output(pin, False)\n else:\n msg = \"Turn on %s light\" % color\n if duration:\n msg += \" for %d seconds\" % duration\n print msg", "def change_to_tasks(self):\n self.ids[\"shp_btn\"].color = 1, 1, 1, 0.5", "def toggle(self):\n self._led_request(self.LED_REQUEST_TOGGLE)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click the ``Confirm`` button and confirm the dialog.
def confirm(self): with self.handle_alert(confirm=True): self.q(css='button#confirm').first.click()
[ "def confirm_dialog_box():\n alert = world.browser.switch_to.alert\n alert.accept()", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def __window_confirm(self, text):\n return True", "def confirm_cancel(self, button):\n if not self.confirm_window_open:\n raise Exception('confirmation window is not open')\n self.confirm_buttons.find_buttons()\n self.confirm_buttons.click(button)\n sleep(1)\n self.confirm_window_open = False", "def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def buttonBox_accepted(self):\n # just close the dialog\n self.ok = True\n self.close()", "def click_button_ok(self):\n # AutoGen method\n self.click_element(self.BUTTON_OK)", "def _confirm(self, confirmation, **kwargs):\n pass", "async def confirm_step(\n self, step_context: WaterfallStepContext\n ) -> DialogTurnResult:\n showlist_details = step_context.options\n\n # Capture the results of the previous step\n message_text = \"\"\n if \"Incidents\" in showlist_details.alist or \"Outages\" in showlist_details.alist:\n message_text = (\n f\"Please confirm, you want to see the list of recent { showlist_details.alist }.\"\n )\n elif \"Changes\" in showlist_details.alist:\n message_text = (\n f\"Please confirm, you want to see the list of { showlist_details.alist } for Service Tree GUID {showlist_details.guid} in the last {showlist_details.age} days.\"\n )\n elif \"Incident\" in showlist_details.alist:\n message_text = (\n f\"Please confirm, you need help on { showlist_details.alist } for IncidentId {showlist_details.incidentid}.\"\n )\n else:\n message_text = (\n f\"Some needed info is missing. Let us do it again.\"\n )\n prompt_message = MessageFactory.text(\n message_text, message_text, InputHints.expecting_input\n )\n\n # Offer a YES/NO prompt.\n return await step_context.prompt(\n ConfirmPrompt.__name__, PromptOptions(prompt=prompt_message)\n )", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def confirm_quit(self):\n\t\tself.ask_quit()\n\t\tself.ask_save()\n\t\treturn True", "def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )", "def click_boton_ok(self):\n self.button.click(vacaciones_crear_catalog.BTN_OK)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }